id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
1764900
|
# Code
def rot13(x):
alpha = "ABCDEFGHIJKLMNOPQRST"
return alpha[0]
|
StarcoderdataPython
|
3468879
|
<gh_stars>0
import pytest
import m26
def assert_almost_equal(x, y, threshold=0.0001):
assert(abs(x - y) < threshold)
def test_constructor_miles():
d = m26.Distance()
assert(d.value == 0)
assert(d.uom == 'm')
d = m26.Distance(26.2)
assert(d.value == 26.2)
assert(d.uom == 'm')
assert(d.is_miles())
assert(d.is_kilometers() == False)
assert(d.is_yards() == False)
assert_almost_equal(d.as_miles(), 26.2)
assert_almost_equal(d.as_kilometers(), 42.1648128)
assert_almost_equal(d.as_yards(), 46112.0)
def test_constructor_kilometers():
d = m26.Distance(50.0, 'k')
assert(d.value == 50.0)
assert(d.uom == 'k')
d = m26.Distance(10, ' K ')
assert(d.value == 10.0)
assert(d.uom == 'k')
assert(d.is_miles() == False)
assert(d.is_kilometers())
assert(d.is_yards() == False)
assert_almost_equal(d.as_miles(), 6.2137119223733395)
assert_almost_equal(d.as_kilometers(), 10.000000)
assert_almost_equal(d.as_yards(), 10936.132983377078)
def test_constructor_yards():
d = m26.Distance(3600.0, 'y')
assert(d.value == 3600.0)
assert(d.uom == 'y')
d = m26.Distance(1800.0, ' Y ')
assert(d.value == 1800.0)
assert(d.uom == 'y')
assert(d.is_miles() == False)
assert(d.is_kilometers() == False)
assert(d.is_yards())
assert_almost_equal(d.as_miles(), 1.0227272727272727)
assert_almost_equal(d.as_kilometers(), 1.64592)
assert_almost_equal(d.as_yards(), 1800.000000)
def test_add_to_miles():
d1 = m26.Distance(26.2, 'm')
d2 = m26.Distance(4.8, 'm')
d3 = m26.Distance(5.0, 'k')
d4 = m26.Distance(1800, 'y')
d1.add(d2)
assert_almost_equal(d1.value, 31.0)
assert(d1.uom == 'm')
d1.add(d3)
assert_almost_equal(d1.value, 34.10685596118667)
d1.add(d4)
assert_almost_equal(d1.value, 35.12958323391394)
assert(d1.uom == 'm')
def test_add_to_kilometers():
d1 = m26.Distance(10.0, 'k')
d2 = m26.Distance(3.1, 'm')
d1.add(d2)
assert(d1.uom == 'k')
assert_almost_equal(d1.value, 14.9889664)
def test_add_to_yards():
d1 = m26.Distance(1800, 'y')
d2 = m26.Distance(1.0, 'm')
d1.add(d2)
assert(d1.uom == 'y')
assert_almost_equal(d1.value, 3560.0)
def test_subtract_from_miles():
d1 = m26.Distance(26.2, 'm')
d2 = m26.Distance(4.8, 'm')
d3 = m26.Distance(5.0, 'k')
d4 = m26.Distance(1800, 'y')
d1.subtract(d2)
assert_almost_equal(d1.value, 21.4)
assert(d1.uom == 'm')
d1.subtract(d3)
assert_almost_equal(d1.value, 18.293144038813328)
d1.subtract(d4)
assert_almost_equal(d1.value, 17.270416766086054)
assert(d1.uom == 'm')
def test_subtract_from_kilometers():
d1 = m26.Distance(10.0, 'k')
d2 = m26.Distance(3.1, 'm')
d1.subtract(d2)
assert(d1.uom == 'k')
assert_almost_equal(d1.value, 5.011033599999999)
def test_subtract_from_yards():
d1 = m26.Distance(3600, 'y')
d2 = m26.Distance(1.0, 'm')
d1.subtract(d2)
assert(d1.uom == 'y')
assert_almost_equal(d1.value, 1840.0)
|
StarcoderdataPython
|
8008332
|
# -*- coding:utf-8 -*-
"""
@author: 古时月
@file: Fight.py
@time: 2021/5/8 13:37
"""
import win32com.client
import traceback
import pyautogui
from functions import *
pyautogui.FAILSAFE = True
class fight:
def __init__(self):
self._name = ""
self._hwnd = 0
self.hwnd2 = 0
self.hwnds= []
self.configs = load_json("configs.json")
self.loc = 0
self.count = 10000
self._num = 0
self.person = 1
def pos_flag(self, choice, hwnd, nimg):
jietu(hwnd, nimg)
if self.configs[choice + "_path"] != "0":
corr, pos = te_compare(self.configs[choice + "_path"], nimg=nimg)
if corr >= 0.6:
return pos
else:
return None
else:
return None
def counts(self, choice):
jietu(self._hwnd, "./nimg/nimg.bmp")
count = te_compares(self.configs[choice + "_path"], "./nimg/nimg.bmp")
return count
def sleep_time(self, times):
time.sleep(CheatTime(times))
def sleep_time2(self):
time.sleep(CheatTime(self.configs[self._name + "_wait_time"] - 6.5))
def sleep_time3(self):
time.sleep(CheatTime(self.configs[self._name + "_wait_time"]))
def click_point(self, position, hwnd, factor=6):
myclick(position, hwnd, factors=factor)
def end2(self, hwnd):
# tf = location(hwnd)
fac = bigOrSmall()
lt = [int(k * v) for k, v in zip(self.configs["END_TOP_LEFT"], fac)]
rb = [int(k * v) for k, v in zip(self.configs["END_BOTTOM_RIGHT"], fac)]
position = [random.randint(lt[0], rb[0]), random.randint(lt[1], rb[1])]
myclick(position, hwnd)
def focus(self):
fious(self.configs, self.loc, self._hwnd)
def yl_focus(self):
yuling_fious(self.configs, self.loc, self._hwnd)
def cT_focus(self):
climbTower_fious(self.configs, self.loc, self._hwnd)
def DriverRun(self):
try:
while True:
choices = [self._name + "_teamwait", self._name + "_isstart", self._name + "_end", self._name + "_end2", self._name + "_tcg", self._name + "_defate"]
for choice in choices:
position = self.pos_flag(choice, self._hwnd, nimg="./nimg/nimg.bmp")
if choice == choices[0]:
person = self.counts(choice)
# logging.info(person)
if person > self.person:
flag1 = False
else:
flag1 = True
self.sleep_time(0.8)
elif choice == choices[1] and position != None and flag1:
st = 0
logging.info("match success,start challenge!")
self.click_point(position, self._hwnd)
if self.loc != 0:
self.sleep_time(6)
self.focus()
self.sleep_time2()
else:
self.sleep_time3()
elif choice == choices[2] and position != None:
# yuhun.click_point(position, factor=50)
self.end2(self._hwnd)
self.sleep_time(0.6)
elif choice == choices[3] and position != None:
if flag1:
self._num += 1
logging.info("challenge success!")
logging.info(f"{time.strftime('%H:%M:%S')} ~~~ end1 {self._num} ~~~")
flag1 = False
self.click_point(position, self._hwnd, factor=30)
self.sleep_time(1)
# yuhun.click_point(position,factor=70)
elif choice == choices[4] and position!= None:
st += 1
if st >= 5 and flag1:
self._num += 1
logging.info("challenge success!")
logging.info(f"{time.strftime('%H:%M:%S')} ~~~ end2 {self._num} ~~~")
flag = False
self.click_point(position, self._hwnd, factor=20)
self.sleep_time(1)
elif st >=6:
self.click_point(position, self._hwnd, factor=20)
self.sleep_time(1)
elif choice == choices[5] and position != None and flag:
logging.warning("challenge defeat!")
self.click_point(position, self._hwnd, factor=80)
flag = False
else:
time.sleep(0.9)
if self._num >= self.count & self.count != 0:
break
except Exception:
# print(Exception)
print(traceback.format_exc())
input(f"{time.strftime('%Y %b %d %H:%M:%S')} Driver 请将此错误信息发给开发者(yl2)")
def passengerRun(self):
try:
while True:
choices = [self._name + "_teamwait", self._name + "_isstart", self._name + "_end", self._name + "_end2", self._name+ "_tcg", self._name + "_defate"]
for choice in choices:
position = self.pos_flag(choice, self.hwnd2, nimg="./nimg/nimg2.bmp")
if choice == choices[0]:
self.sleep_time(1.1)
st = 0
elif choice == choices[1] and position != None:
st = 0
# logging.info("match success,start challenge!")
self.sleep_time(6)
if self.loc != 0:
self.sleep_time(6)
self.focus()
self.sleep_time2()
else:
self.sleep_time3()
elif choice == choices[2] and position != None:
# yuhun.click_point(position, factor=50)
self.end2(self.hwnd2)
self.sleep_time(0.5)
elif choice == choices[3] and position != None:
self._num += 1
# logging.info("challenge success!")
# logging.info(f"~~~~~~~~~~~~~~~ {self._num} ~~~~~~~~~~~~~~~")
self.click_point(position, self._hwnd, factor=30)
self.sleep_time(0.5)
# yuhun.click_point(position,factor=70)
elif choice == choices[4] and position != None:
st += 1
if st >= 4:
self._num += 1
logging.info("challenge success!")
logging.info(f"~~~~~~~~~~~~~~~ end2 {self._num} ~~~~~~~~~~~~~~~")
flag = False
self.click_point(position, self._hwnd, factor=20)
self.sleep_time(1)
elif choice == choices[5] and position != None:
# logging.warning("challenge defeat!")
self.click_point(position, self._hwnd, factor=80)
else:
time.sleep(1)
if self._num >= self.count & self.count != 0:
break
except pyautogui.FailSafeException:
input("单次运行时间过长,请重启软件")
pass
except Exception:
# print(Exception)
print(traceback.format_exc())
input(f"{time.strftime('%Y %b %d %H:%M:%S')} Driver 请将此错误信息发给开发者(yl2)")
def DoubleRun(self):
# TODO:while True循环写在try之前,捕获puautogui异常,或者win32gui异常。
try:
st =0
flag1 = False
choices = [self._name + "_teamwait", self._name + "_isstart", self._name + "_end", self._name + "_end2",
# self._name + "_tcg"
# self._name + "_defate"
]
choices1 = [self._name + "_teamwait", self._name + "_isstart"]
choices2 = [self._name + "_end", self._name + "_end2", self._name + "_tcg"]
while True:
for choice in choices:
position = self.pos_flag(choice, self._hwnd, nimg="./nimg/nimg.bmp")
if choice == choices[0]:
person = self.counts(choice)
# logging.info(person)
if person > self.person:
flag1 = False
else:
flag1 = True
self.sleep_time(0.8)
elif choice == choices[1] and position != None and flag1:
st = 0
self._num +=1
logging.info("match success,start challenge!")
self.click_point(position, self._hwnd)
if self.loc != 0:
self.sleep_time(7)
self.focus()
self.sleep_time2()
else:
self.sleep_time3()
elif choice == choices[2] and position != None:
# yuhun.click_point(position, factor=50)
self.end2(self._hwnd)
time.sleep(0.7)
self.end2(self.hwnd2)
time.sleep(1)
elif choice == choices[3] and position != None:
if flag1:
logging.info("challenge success!")
logging.info(f"{time.strftime('%H:%M:%S')} ~~~ end1 {self._num} ~~~")
flag1 = False
self.click_point(position, self.hwnd2, factor=30)
time.sleep(0.7)
self.click_point(position, self._hwnd, factor=30)
time.sleep(0.7)
self.click_point(position, self.hwnd2, factor=30)
time.sleep(0.2)
self.click_point(position, self.hwnd2, factor=30)
# self.sleep_time(1)
# yuhun.click_point(position,factor=70)
# elif choice == choices[4] and position!= None:
# st += 1
# if st >= 4 and flag1:
# logging.info("challenge success!")
# logging.info(f"{time.strftime('%H:%M:%S')} ~~~ end2 {self._num} ~~~")
# self.click_point(position, hwnd=self.hwnd2, factor=20)
# time.sleep(0.7)
# self.click_point(position, self._hwnd, factor=20)
# self.sleep_time(1)
# elif st >= 5:
# self.click_point(position, hwnd=self.hwnd2, factor=20)
# time.sleep(0.7)
# self.click_point(position, self._hwnd, factor=20)
# self.sleep_time(1)
# elif choice == choices[5] and position != None and flag:
# logging.warning("challenge defeat!")
# self.click_point(position, hwnd=self.hwnd2, factor=80)
# time.sleep(0.7)
# self.click_point(position, self._hwnd, factor=80)
# flag = False
else:
time.sleep(0.9)
if self._num >= self.count & self.count != 0:
break
except pyautogui.FailSafeException:
print(f"{time.strftime('%Y %b %d %H:%M:%S')} pyautogui error!")
except Exception:
# print(Exception)
print(traceback.format_exc())
input(f"{time.strftime('%Y %b %d %H:%M:%S')} Driver 请将此错误信息发给开发者(yl2)")
def singleRun(self):
try:
while True:
choices = [self._name + "_isstart", self._name + "_end", self._name + "_end2",
self._name + "_defate"]
for choice in choices:
position = self.pos_flag(choice, self._hwnd, "./nimg/nimg.bmp")
if choice == choices[0] and position != None:
logging.info("match success,start challenge!")
self.click_point(position, self._hwnd)
if self.loc != 0:
self.sleep_time(6)
self.focus()
self.sleep_time2()
else:
self.sleep_time3()
flag = True
elif choice == choices[1] and position != None:
# yuhun.click_point(position, factor=50)
self.end2(self._hwnd)
self.sleep_time(0.8)
elif choice == choices[2] and position != None:
if flag:
self._num += 1
logging.info("challenge success!")
logging.info(f"~~~~~~~~~~~~~~~ {self._num} ~~~~~~~~~~~~~~~")
flag = False
self.click_point(position, self._hwnd, factor=70)
self.sleep_time(0.5)
# yuhun.click_point(position,factor=70)
elif choice == choices[3] and position != None:
logging.warning("challenge defeat!")
self.click_point(position, self._hwnd, factor=30)
flag = False
else:
self.sleep_time(1)
if self._num >= self.count & self.count != 0:
break
except Exception:
# print(Exception)
print(traceback.format_exc())
input(f"{time.strftime('%Y %b %d %H:%M:%S')} Driver 请将此错误信息发给开发者(yl2)")
def YulingRun(self):
try:
while True:
choices = ["yuling_isstart", "yuling_end", "yuling_end2", "yuling_defate"]
for choice in choices:
position = self.pos_flag(choice, self._hwnd, "./nimg/nimg.bmp")
if choice == choices[0] and position != None:
logging.info(f"match success,start challenge!")
self.click_point(position,self._hwnd)
if self.loc != 0:
self.sleep_time(5.8)
self.yl_focus()
self.sleep_time2()
else:
self.sleep_time3()
elif choice == choices[1] and position != None:
self._num += 1
logging.info("challenge success!")
logging.info(f"~~~~~~~~~~~~~~~ {self._num} ~~~~~~~~~~~~~~~")
self.click_point(position, self._hwnd, factor=50)
self.sleep_time(2)
elif choice == choices[2] and position != None:
pass
elif choice == choices[3] and position != None:
logging.warning("challenge defeat!")
self.click_point(position, self._hwnd,factor=50)
else:
self.sleep_time(2)
if self._num >= self.count & self.count != 0:
break
except Exception:
# print(Exception)
print(traceback.format_exc())
input(f"{time.strftime('%Y %b %d %H:%M:%S')} Driver 请将此错误信息发给开发者(yl2)")
def climbTowerRun(self):
choices = [self._name + "_isstart", self._name + "_end"]
try:
while True:
for choice in choices:
position = self.pos_flag(choice, self._hwnd, "./nimg/nimg.bmp")
if choice == choices[0] and position != None:
# self._num += 1
startTime = time.time()
logging.info("match success,start challenge!")
self.click_point(position, self._hwnd)
self.sleep_time3()
flag = True
elif choice == choices[1] and position != None:
if flag:
self._num += 1
endTime = time.time()
logging.info(f"本次耗时{round(endTime-startTime,2)}")
logging.info("challenge success!")
logging.info(f"~~~~~~~~~~~~~~~ {self._num} ~~~~~~~~~~~~~~~")
flag = False
self.end2(self._hwnd)
self.sleep_time(2)
else:
self.sleep_time(1)
if self._num >= self.count & self.count != 0:
break
except Exception:
# print(Exception)
print(traceback.format_exc())
input(f"{time.strftime('%Y %b %d %H:%M:%S')} Driver 请将此错误信息发给开发者(yl2)")
def setClimbTower(self,hwnd='', loc=0, count=100000):
self._name = "climbTower"
if hwnd == "":
hwnd = find_window_by_title("阴阳师-网易游戏")
self._hwnd = hwnd
self.loc = loc
self.count = count
win32gui.SetForegroundWindow(self._hwnd)
win32gui.MoveWindow(self._hwnd, 384, 189, 1152, 679, True)
def setSingle(self, hwnd='', loc=0, count=10000):
self._name = "Syuhun"
if hwnd == "":
hwnd = find_window_by_title("阴阳师-网易游戏")
self._hwnd = hwnd
self.loc = loc
self.count = count
win32gui.SetForegroundWindow(self._hwnd)
win32gui.MoveWindow(self._hwnd, 384, 189, 1152, 679, True)
def setDriver(self, hwnd="", loc=0, count=10000):
self._name = "Tyuhun"
if hwnd == "":
hwnd = find_window_by_title("阴阳师-网易游戏")
self._hwnd = hwnd
self.loc = loc
self.count = count
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys('%')
win32gui.SetForegroundWindow(self._hwnd) # 激活窗口
win32gui.MoveWindow(self._hwnd, 10, 30, 864, 510, True)
def setPassenger(self, hwnd="", loc=0, count=10000):
self._name = "Tyuhun"
if hwnd == "":
hwnd = find_window_by_title("阴阳师-网易游戏")
self.hwnd2 = hwnd
self.loc = loc
self.count = count
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys('%')
win32gui.SetForegroundWindow(self.hwnd2) # 激活窗口
win32gui.MoveWindow(self.hwnd2, 900, 30, 864, 510, True)
def setDoublt(self, loc=0, count=10000):
self._name = "Tyuhun"
self.person = 1
self.loc = loc
self.count = count
hwnds = find_windows_by_title("阴阳师-网易游戏")
x1 = self.pos_flag(self._name + "_isstart", hwnds[0],"./nimg/nimg.bmp")
x2 = self.pos_flag(self._name + "_isstart", hwnds[-1], "./nimg/nimg2.bmp")
if x1 != None and x2 != None:
self.setDoublt()
self.sleep_time(3)
elif x1 == None and x2 == None:
print('请勿遮挡游戏窗口')
self.setDoublt()
self.sleep_time(3)
elif x1 != None:
self._hwnd = hwnds[0]
self.hwnd2 = hwnds[1]
else:
self._hwnd = hwnds[1]
self.hwnd2 = hwnds[0]
win32gui.SetForegroundWindow(self._hwnd) # 激活窗口
win32gui.MoveWindow(self._hwnd, 10, 30, 864, 510, True)
def setDoublt2(self, loc=0, count=10000):
self._name = "Tyuhun"
self.person = 1
self.loc = loc
self.count = count
hwnds = find_windows_by_title("阴阳师-网易游戏")
x1 = self.pos_flag(self._name + "_isstart", hwnds[0],"./nimg/nimg.bmp")
x2 = self.pos_flag(self._name + "_isstart", hwnds[-1], "./nimg/nimg2.bmp")
if x1 != None and x2 != None:
self.setDoublt2(loc, count)
self.sleep_time(3)
elif x1 == None and x2 == None:
print('请勿遮挡游戏窗口')
self.setDoublt2(loc, count)
self.sleep_time(3)
elif x1 != None:
self._hwnd = hwnds[0]
self.hwnd2 = hwnds[1]
else:
self._hwnd = hwnds[1]
self.hwnd2 = hwnds[0]
win32gui.SetForegroundWindow(self._hwnd) # 激活窗口
win32gui.MoveWindow(self._hwnd, 10, 30, 922, 543, True)
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys('%')
win32gui.SetForegroundWindow(self._hwnd) # 激活窗口
win32gui.MoveWindow(self.hwnd2, 940, 30, 922, 543, True)
def setYuling(self, hwnd='', loc=0, count=10000):
self._name = "yuling"
if hwnd == "":
hwnd = find_window_by_title("阴阳师-网易游戏")
self._hwnd = hwnd
self.loc = loc
self.count = count
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys('%')
win32gui.SetForegroundWindow(self._hwnd)
win32gui.MoveWindow(self._hwnd, 384, 189, 1152, 679, True)
|
StarcoderdataPython
|
5076201
|
<reponame>SimonBoothroyd/nagl
import abc
import dgl
import torch.nn
from pydantic import BaseModel
from typing_extensions import Literal
class PostprocessLayer(torch.nn.Module, abc.ABC):
"""A layer to apply to the final readout of a neural network."""
@classmethod
@abc.abstractmethod
def from_config(cls, config):
"""Create an instance of a post-process layer from its configuration."""
@abc.abstractmethod
def forward(self, graph: dgl.DGLGraph, x: torch.Tensor) -> torch.Tensor:
"""Returns the post-processed input vector."""
class ComputePartialCharges(PostprocessLayer):
"""A layer which will map an NN readout containing a set of atomic electronegativity
and hardness parameters to a set of partial charges [1].
References:
[1] Gilson, <NAME>., <NAME>, and <NAME>. "Fast
assignment of accurate partial atomic charges: an electronegativity
equalization method that accounts for alternate resonance forms." Journal of
chemical information and computer sciences 43.6 (2003): 1982-1997.
"""
class Config(BaseModel):
"""Configuration options for a ``ComputePartialCharges`` layer."""
type: Literal["ComputePartialCharges"] = "ComputePartialCharges"
@classmethod
def from_config(cls, config: "ComputePartialCharges.Config"):
"""Create an instance of a post-process layer from its configuration."""
return cls()
@classmethod
def atomic_parameters_to_charges(
cls,
electronegativity: torch.Tensor,
hardness: torch.Tensor,
total_charge: float,
) -> torch.Tensor:
"""Converts a set of atomic electronegativity and hardness parameters to a
set of partial atomic charges subject to a total charge constraint.
Args:
electronegativity: The electronegativity of atoms in a given molecule.
hardness: The hardness of atoms in a given molecule.
total_charge: The total charge on the molecule.
Returns:
The atomic partial charges.
"""
inverse_hardness = 1.0 / hardness
charges = (
-inverse_hardness * electronegativity
+ inverse_hardness
* torch.div(
torch.dot(inverse_hardness, electronegativity) + total_charge,
torch.sum(inverse_hardness),
)
).reshape(-1, 1)
return charges
def forward(self, graph: dgl.DGLGraph, x: torch.Tensor) -> torch.Tensor:
charges = []
counter = 0
for mol_graph in dgl.unbatch(graph):
total_charge = mol_graph.ndata["formal_charge"].sum()
charges.append(
self.atomic_parameters_to_charges(
x[counter : counter + len(mol_graph), 0],
x[counter : counter + len(mol_graph), 1],
total_charge,
)
)
counter += len(mol_graph)
return torch.vstack(charges)
|
StarcoderdataPython
|
8064055
|
<gh_stars>10-100
#!/usr/bin/env python
# coding: utf-8
import os
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
import lightgbm as lgb
class ModelExtractionCallback(object):
"""
original author : momijiame
ref : https://blog.amedama.jp/entry/lightgbm-cv-model
description : Class for callback to extract trained models from lightgbm.cv().
note: This class depends on private class '_CVBooster', so there are some future risks.
"""
def __init__(self):
self._model = None
def __call__(self, env):
# _CVBooster の参照を保持する
self._model = env.model
def _assert_called_cb(self):
if self._model is None:
# コールバックが呼ばれていないときは例外にする
raise RuntimeError('callback has not called yet')
@property
def boosters_proxy(self):
self._assert_called_cb()
# Booster へのプロキシオブジェクトを返す
return self._model
@property
def raw_boosters(self):
self._assert_called_cb()
# Booster のリストを返す
return self._model.boosters
@property
def best_iteration(self):
self._assert_called_cb()
# Early stop したときの boosting round を返す
return self._model.best_iteration
def arrange_dataset(df, cnum):
_dset = df.filter(regex=f'var_{cnum}$')
_dset.columns = list(range(_dset.shape[1]))
_dset = _dset.assign(var_num = cnum)
return _dset
def main():
model_output_dir = f'../processed/lgb_output/'
if not os.path.isdir(model_output_dir):
os.makedirs(model_output_dir)
dataset_dir = '../processed/dataset/'
X_train = pd.read_pickle(os.path.join(dataset_dir, 'X_train.pickle'))
y_train = pd.read_pickle(os.path.join(dataset_dir, 'y_train.pickle'))
X_test = pd.read_pickle(os.path.join(dataset_dir, 'X_test.pickle'))
params = {
'bagging_freq': 5,
'bagging_fraction': 0.95,
'boost_from_average':'false',
'boost': 'gbdt',
'feature_fraction': 1.0,
'learning_rate': 0.005,
'max_depth': -1,
'metric':'binary_logloss',
'min_data_in_leaf': 30,
'min_sum_hessian_in_leaf': 10.0,
'num_leaves': 64,
'num_threads': 32,
'tree_learner': 'serial',
'objective': 'binary',
'verbosity': 1}
dset_list = []
for cnum in range(200):
_dset = arrange_dataset(X_train, cnum)
dset_list.append(_dset)
concat_X_train = pd.concat(dset_list, axis=0)
concat_X_train['var_num'] = concat_X_train['var_num'].astype('category')
train_dset = lgb.Dataset(
concat_X_train,
pd.concat([y_train for c in range(200)], axis=0),
free_raw_data=False
)
for fold_set_number in range(10):
print('### start iter {} in 10 ###'.format(fold_set_number+1))
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=2019+fold_set_number)
folds = [
[
np.concatenate([_trn+i * X_train.shape[0] for i in range(200)]),
np.concatenate([_val+i * X_train.shape[0] for i in range(200)])
] for _trn, _val in skf.split(X_train, y_train)]
extraction_cb = ModelExtractionCallback()
callbacks = [extraction_cb,]
print('start training. ')
cv_result = lgb.cv(params, train_set=train_dset, num_boost_round=100000,
early_stopping_rounds=100, verbose_eval=100, folds=folds, callbacks=callbacks)
bsts = extraction_cb.raw_boosters
best_iteration = extraction_cb.best_iteration
print('training end. ')
print('start predicting. ')
oof_pred_array = np.ones((X_train.shape[0], 200))
test_pred_array = np.ones((X_test.shape[0], 5, 200))
for cnum in tqdm(range(200)):
for i, bst in enumerate(bsts):
cv_valid_index = bst.valid_sets[0].used_indices
cv_valid_index = cv_valid_index[:int(cv_valid_index.shape[0]/200)]
# oofの予測
cv_valid_data = arrange_dataset(X_train, cnum).iloc[cv_valid_index].values
oof_pred_array[cv_valid_index, cnum] = bst.predict(cv_valid_data, num_iteration=best_iteration)
# testの予測
test_pred_array[:, i, cnum] = bst.predict(arrange_dataset(X_test, cnum).values, num_iteration=best_iteration)
print('prediction end. ')
print('start postprocess. ')
thr = 0.500
oof_pred_odds_prod = np.ones((X_train.shape[0]))
test_pred_odds_prod = np.ones((X_test.shape[0], 5))
for cnum in tqdm(range(200)):
tmp_auc = roc_auc_score(y_train, oof_pred_array[:, cnum])
if tmp_auc >= thr:
oof_pred_odds_prod *= oof_pred_array[:, cnum] / (1 - oof_pred_array[:, cnum])
test_pred_odds_prod *= test_pred_array[:,:, cnum] / (1 - test_pred_array[:,:, cnum])
print('postprocess end. auc : {0:.6f}'.format(roc_auc_score(y_train, oof_pred_odds_prod)))
print('save iteration results')
pd.DataFrame(oof_pred_odds_prod, index=X_train.index, columns=['pred'])\
.to_pickle(os.path.join(model_output_dir, f'oof_preds_{fold_set_number}.pkl.gz'), compression='gzip')
for fold_num in range(5):
model_management_num = fold_num + fold_set_number*5
pd.DataFrame(test_pred_odds_prod[:, fold_num], index=X_test.index, columns=['pred'])\
.to_pickle(os.path.join(model_output_dir, f'test_preds_{model_management_num}.pkl.gz'), compression='gzip')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
47455
|
<gh_stars>0
# Generated by Django 2.2.13 on 2020-09-27 03:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shows', '0005_auto_20200923_0900'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created.', verbose_name='created at')),
('modified', models.DateTimeField(auto_now=True, help_text='Date time on which the object was last modified.', verbose_name='modified at')),
('active', models.BooleanField(default=True, verbose_name='Activo')),
('comment', models.CharField(blank=True, max_length=210, null=True, verbose_name='comment')),
('subject', models.CharField(blank=True, max_length=50, null=True, verbose_name='subject')),
('rate', models.SmallIntegerField(default=1, verbose_name='rate')),
('ip', models.CharField(blank=True, max_length=20, null=True, verbose_name='ip')),
('status', models.CharField(choices=[('New', 'New'), ('True', 'True'), ('False', 'False')], default='New', max_length=10, verbose_name='status')),
('show', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shows.Show')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
StarcoderdataPython
|
125693
|
<filename>PyScraper/module1.py
import glob
import imageio
import matplotlib.pyplot as plt
from matplotlib.image import imread
import numpy as np
import os
import PIL
from tensorflow.keras import layers
import time
from IPython import display
from PIL import Image, ImageOps
import glob
import numpy as np
IMG_DIR = './data/eyes/'
image_list = []
image_gray_list = []
save_images = True
# Grab all of our images
imcount = 0;
for filename in glob.glob(IMG_DIR + "*.jpg"):
if imcount == 16:
break
imcount+=1
im=Image.open(filename)
print(f"Loading {filename}")
resize_image = im.resize((64, 64))
#ImageOps.invert(resize_image)
gray_image = resize_image.convert("RGB")
image_list.append(gray_image)
image_gray_list.append(np.array(gray_image))
print("Loaded " + str(len(image_list)) + " images")
np_image_list = np.asarray(image_gray_list)
result = Image.new("RGB", (256, 256))
i = 0
for y in range(0, 4):
for x in range(0, 4):
img = (np_image_list[i])
result.paste(Image.fromarray(img), (y * 64, x * 64))
i += 1
result.save("___test.png")
|
StarcoderdataPython
|
11340847
|
import numpy as np
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
import os
from six import string_types
from xcs_soxs.utils import mylog, parse_value, get_rot_mat, \
downsample
from xcs_soxs.instrument_registry import instrument_registry
from tqdm import tqdm
def wcs_from_event_file(f):
h = f["EVENTS"].header
w = pywcs.WCS(naxis=2)
w.wcs.crval = [h["TCRVL2"], h["TCRVL3"]]
w.wcs.crpix = [h["TCRPX2"], h["TCRPX3"]]
w.wcs.cdelt = [h["TCDLT2"], h["TCDLT3"]]
w.wcs.ctype = [h["TCTYP2"], h["TCTYP3"]]
w.wcs.cunit = [h["TCUNI2"], h["TCUNI3"]]
return w
def write_event_file(events, parameters, filename, overwrite=False):
from astropy.time import Time, TimeDelta
mylog.info("Writing events to file %s." % filename)
t_begin = Time.now()
dt = TimeDelta(parameters["exposure_time"], format='sec')
t_end = t_begin + dt
col_x = pyfits.Column(name='X', format='D', unit='pixel', array=events["xpix"])
col_y = pyfits.Column(name='Y', format='D', unit='pixel', array=events["ypix"])
col_e = pyfits.Column(name='ENERGY', format='E', unit='eV', array=events["energy"]*1000.)
col_dx = pyfits.Column(name='DETX', format='D', unit='pixel', array=events["detx"])
col_dy = pyfits.Column(name='DETY', format='D', unit='pixel', array=events["dety"])
col_id = pyfits.Column(name='CCD_ID', format='D', unit='pixel', array=events["ccd_id"])
chantype = parameters["channel_type"]
if chantype == "PHA":
cunit = "adu"
elif chantype == "PI":
cunit = "Chan"
col_ch = pyfits.Column(name=chantype.upper(), format='1J', unit=cunit, array=events[chantype])
col_t = pyfits.Column(name="TIME", format='1D', unit='s', array=events['time'])
cols = [col_e, col_x, col_y, col_ch, col_t, col_dx, col_dy, col_id]
coldefs = pyfits.ColDefs(cols)
tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
tbhdu.name = "EVENTS"
tbhdu.header["MTYPE1"] = "sky"
tbhdu.header["MFORM1"] = "x,y"
tbhdu.header["MTYPE2"] = "EQPOS"
tbhdu.header["MFORM2"] = "RA,DEC"
tbhdu.header["TCTYP2"] = "RA---TAN"
tbhdu.header["TCTYP3"] = "DEC--TAN"
tbhdu.header["TCRVL2"] = parameters["sky_center"][0]
tbhdu.header["TCRVL3"] = parameters["sky_center"][1]
tbhdu.header["TCDLT2"] = -parameters["plate_scale"]
tbhdu.header["TCDLT3"] = parameters["plate_scale"]
tbhdu.header["TCRPX2"] = parameters["pix_center"][0]
tbhdu.header["TCRPX3"] = parameters["pix_center"][1]
tbhdu.header["TCUNI2"] = "deg"
tbhdu.header["TCUNI3"] = "deg"
tbhdu.header["TLMIN2"] = 0.5
tbhdu.header["TLMIN3"] = 0.5
tbhdu.header["TLMAX2"] = 2.0*parameters["num_pixels"]+0.5
tbhdu.header["TLMAX3"] = 2.0*parameters["num_pixels"]+0.5
tbhdu.header["TLMIN4"] = parameters["chan_lim"][0]
tbhdu.header["TLMAX4"] = parameters["chan_lim"][1]
tbhdu.header["TLMIN6"] = -0.5*parameters["num_pixels"]
tbhdu.header["TLMAX6"] = 0.5*parameters["num_pixels"]
tbhdu.header["TLMIN7"] = -0.5*parameters["num_pixels"]
tbhdu.header["TLMAX7"] = 0.5*parameters["num_pixels"]
tbhdu.header["EXPOSURE"] = parameters["exposure_time"]
tbhdu.header["TSTART"] = 0.0
tbhdu.header["TSTOP"] = parameters["exposure_time"]
tbhdu.header["HDUVERS"] = "1.1.0"
tbhdu.header["RADECSYS"] = "FK5"
tbhdu.header["EQUINOX"] = 2000.0
tbhdu.header["HDUCLASS"] = "OGIP"
tbhdu.header["HDUCLAS1"] = "EVENTS"
tbhdu.header["HDUCLAS2"] = "ACCEPTED"
tbhdu.header["DATE"] = t_begin.tt.isot
tbhdu.header["DATE-OBS"] = t_begin.tt.isot
tbhdu.header["DATE-END"] = t_end.tt.isot
tbhdu.header["RESPFILE"] = os.path.split(parameters["rmf"])[-1]
tbhdu.header["PHA_BINS"] = parameters["nchan"]
tbhdu.header["ANCRFILE"] = os.path.split(parameters["arf"])[-1]
tbhdu.header["CHANTYPE"] = parameters["channel_type"]
tbhdu.header["MISSION"] = parameters["mission"]
tbhdu.header["TELESCOP"] = parameters["telescope"]
tbhdu.header["INSTRUME"] = parameters["instrument"]
tbhdu.header["RA_PNT"] = parameters["sky_center"][0]
tbhdu.header["DEC_PNT"] = parameters["sky_center"][1]
tbhdu.header["ROLL_PNT"] = parameters["roll_angle"]
tbhdu.header["AIMPT_X"] = parameters["aimpt_coords"][0]
tbhdu.header["AIMPT_Y"] = parameters["aimpt_coords"][1]
if parameters["dither_params"]["dither_on"]:
tbhdu.header["DITHXAMP"] = parameters["dither_params"]["x_amp"]
tbhdu.header["DITHYAMP"] = parameters["dither_params"]["y_amp"]
tbhdu.header["DITHXPER"] = parameters["dither_params"]["x_period"]
tbhdu.header["DITHYPER"] = parameters["dither_params"]["y_period"]
start = pyfits.Column(name='START', format='1D', unit='s',
array=np.array([0.0]))
stop = pyfits.Column(name='STOP', format='1D', unit='s',
array=np.array([parameters["exposure_time"]]))
tbhdu_gti = pyfits.BinTableHDU.from_columns([start,stop])
tbhdu_gti.name = "STDGTI"
tbhdu_gti.header["TSTART"] = 0.0
tbhdu_gti.header["TSTOP"] = parameters["exposure_time"]
tbhdu_gti.header["HDUCLASS"] = "OGIP"
tbhdu_gti.header["HDUCLAS1"] = "GTI"
tbhdu_gti.header["HDUCLAS2"] = "STANDARD"
tbhdu_gti.header["RADECSYS"] = "FK5"
tbhdu_gti.header["EQUINOX"] = 2000.0
tbhdu_gti.header["DATE"] = t_begin.tt.isot
tbhdu_gti.header["DATE-OBS"] = t_begin.tt.isot
tbhdu_gti.header["DATE-END"] = t_end.tt.isot
hdulist = [pyfits.PrimaryHDU(), tbhdu, tbhdu_gti]
pyfits.HDUList(hdulist).writeto(filename, overwrite=overwrite)
def parse_region_args(rtype, args, dx, dy):
if rtype == "Box":
xctr, yctr, xw, yw = args
new_args = [xctr + dx, yctr + dy, xw, yw]
elif rtype == "Circle":
xctr, yctr, radius = args
new_args = [xctr + dx, yctr + dx, radius]
elif rtype == "Polygon":
new_args = [[x + dx for x in args[0]],
[y + dy for y in args[1]]]
else:
raise NotImplementedError
return new_args
def make_exposure_map(event_file, expmap_file, energy, weights=None,
asol_file=None, normalize=True, overwrite=False,
reblock=1, nhistx=16, nhisty=16, order=1):
"""
Make an exposure map for a SOXS event file, and optionally write
an aspect solution file. The exposure map will be created by
binning an aspect histogram over the range of the aspect solution.
Parameters
----------
event_file : string
The path to the event file to use for making the exposure map.
expmap_file : string
The path to write the exposure map file to.
energy : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`, or NumPy array
The energy in keV to use when computing the exposure map, or
a set of energies to be used with the *weights* parameter. If
providing a set, it must be in keV.
weights : array-like, optional
The weights to use with a set of energies given in the
*energy* parameter. Used to create a more accurate exposure
map weighted by a range of energies. Default: None
asol_file : string, optional
The path to write the aspect solution file to, if desired.
Default: None
normalize : boolean, optional
If True, the exposure map will be divided by the exposure time
so that the map's units are cm**2. Default: True
overwrite : boolean, optional
Whether or not to overwrite an existing file. Default: False
reblock : integer, optional
Supply an integer power of 2 here to make an exposure map
with a different binning. Default: 1
nhistx : integer, optional
The number of bins in the aspect histogram in the DETX
direction. Default: 16
nhisty : integer, optional
The number of bins in the aspect histogram in the DETY
direction. Default: 16
order : integer, optional
The interpolation order to use when making the exposure map.
Default: 1
"""
import pyregion._region_filter as rfilter
from scipy.ndimage.interpolation import rotate, shift
from xcs_soxs.instrument import AuxiliaryResponseFile, perform_dither
if isinstance(energy, np.ndarray) and weights is None:
raise RuntimeError("Must supply a single value for the energy if "
"you do not supply weights!")
if not isinstance(energy, np.ndarray):
energy = parse_value(energy, "keV")
f_evt = pyfits.open(event_file)
hdu = f_evt["EVENTS"]
arf = AuxiliaryResponseFile(hdu.header["ANCRFILE"])
exp_time = hdu.header["EXPOSURE"]
nx = int(hdu.header["TLMAX2"]-0.5)//2
ny = int(hdu.header["TLMAX3"]-0.5)//2
ra0 = hdu.header["TCRVL2"]
dec0 = hdu.header["TCRVL3"]
xdel = hdu.header["TCDLT2"]
ydel = hdu.header["TCDLT3"]
x0 = hdu.header["TCRPX2"]
y0 = hdu.header["TCRPX3"]
xdet0 = 0.5*(2*nx+1)
ydet0 = 0.5*(2*ny+1)
xaim = hdu.header.get("AIMPT_X", 0.0)
yaim = hdu.header.get("AIMPT_Y", 0.0)
roll = hdu.header["ROLL_PNT"]
instr = instrument_registry[hdu.header["INSTRUME"].lower()]
dither_params = {}
if "DITHXAMP" in hdu.header:
dither_params["x_amp"] = hdu.header["DITHXAMP"]
dither_params["y_amp"] = hdu.header["DITHYAMP"]
dither_params["x_period"] = hdu.header["DITHXPER"]
dither_params["y_period"] = hdu.header["DITHYPER"]
dither_params["plate_scale"] = ydel*3600.0
dither_params["dither_on"] = True
else:
dither_params["dither_on"] = False
f_evt.close()
# Create time array for aspect solution
dt = 1.0 # Seconds
t = np.arange(0.0, exp_time+dt, dt)
# Construct WCS
w = pywcs.WCS(naxis=2)
w.wcs.crval = [ra0, dec0]
w.wcs.crpix = [x0, y0]
w.wcs.cdelt = [xdel, ydel]
w.wcs.ctype = ["RA---TAN","DEC--TAN"]
w.wcs.cunit = ["deg"]*2
# Create aspect solution if we had dithering.
# otherwise just set the offsets to zero
if dither_params["dither_on"]:
x_off, y_off = perform_dither(t, dither_params)
# Make the aspect histogram
x_amp = dither_params["x_amp"]/dither_params["plate_scale"]
y_amp = dither_params["y_amp"]/dither_params["plate_scale"]
x_edges = np.linspace(-x_amp, x_amp, nhistx+1, endpoint=True)
y_edges = np.linspace(-y_amp, y_amp, nhisty+1, endpoint=True)
asphist = np.histogram2d(x_off, y_off, (x_edges, y_edges))[0]
asphist *= dt
x_mid = 0.5*(x_edges[1:]+x_edges[:-1])/reblock
y_mid = 0.5*(y_edges[1:]+y_edges[:-1])/reblock
# Determine the effective area
eff_area = arf.interpolate_area(energy).value
if weights is not None:
eff_area = np.average(eff_area, weights=weights)
if instr["chips"] is None:
rtypes = ["Box"]
args = [[0.0, 0.0, instr["num_pixels"], instr["num_pixels"]]]
else:
rtypes = []
args = []
for i, chip in enumerate(instr["chips"]):
rtypes.append(chip[0])
args.append(np.array(chip[1:]))
tmpmap = np.zeros((2*nx, 2*ny))
for rtype, arg in zip(rtypes, args):
rfunc = getattr(rfilter, rtype)
new_args = parse_region_args(rtype, arg, xdet0-xaim-1.0, ydet0-yaim-1.0)
r = rfunc(*new_args)
tmpmap += r.mask(tmpmap).astype("float64")
tmpmap = downsample(tmpmap, reblock)
if dither_params["dither_on"]:
expmap = np.zeros(tmpmap.shape)
niter = nhistx*nhisty
pbar = tqdm(leave=True, total=niter, desc="Creating exposure map ")
for i in range(nhistx):
for j in range(nhisty):
expmap += shift(tmpmap, (x_mid[i], y_mid[j]), order=order)*asphist[i, j]
pbar.update(nhisty)
pbar.close()
else:
expmap = tmpmap*exp_time
expmap *= eff_area
if normalize:
expmap /= exp_time
if roll != 0.0:
rotate(expmap, roll, output=expmap, reshape=False)
expmap[expmap < 0.0] = 0.0
map_header = {"EXPOSURE": exp_time,
"MTYPE1": "EQPOS",
"MFORM1": "RA,DEC",
"CTYPE1": "RA---TAN",
"CTYPE2": "DEC--TAN",
"CRVAL1": ra0,
"CRVAL2": dec0,
"CUNIT1": "deg",
"CUNIT2": "deg",
"CDELT1": xdel*reblock,
"CDELT2": ydel*reblock,
"CRPIX1": 0.5*(2.0*nx//reblock+1),
"CRPIX2": 0.5*(2.0*ny//reblock+1)}
map_hdu = pyfits.ImageHDU(expmap, header=pyfits.Header(map_header))
map_hdu.name = "EXPMAP"
map_hdu.writeto(expmap_file, overwrite=overwrite)
if asol_file is not None:
if dither_params["dither_on"]:
det = np.array([x_off, y_off])
pix = np.dot(get_rot_mat(roll).T, det)
ra, dec = w.wcs_pix2world(pix[0,:]+x0, pix[1,:]+y0, 1)
col_t = pyfits.Column(name='time', format='D', unit='s', array=t)
col_ra = pyfits.Column(name='ra', format='D', unit='deg', array=ra)
col_dec = pyfits.Column(name='dec', format='D', unit='deg', array=dec)
coldefs = pyfits.ColDefs([col_t, col_ra, col_dec])
tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
tbhdu.name = "ASPSOL"
tbhdu.header["EXPOSURE"] = exp_time
hdulist = [pyfits.PrimaryHDU(), tbhdu]
pyfits.HDUList(hdulist).writeto(asol_file, overwrite=overwrite)
else:
mylog.warning("Refusing to write an aspect solution file because "
"there was no dithering.")
def _write_spectrum(bins, spec, exp_time, spectype, parameters,
specfile, overwrite=False):
col1 = pyfits.Column(name='CHANNEL', format='1J', array=bins)
col2 = pyfits.Column(name=spectype.upper(), format='1D', array=bins.astype("float64"))
col3 = pyfits.Column(name='COUNTS', format='1J', array=spec.astype("int32"))
col4 = pyfits.Column(name='COUNT_RATE', format='1D', array=spec/exp_time)
coldefs = pyfits.ColDefs([col1, col2, col3, col4])
tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
tbhdu.name = "SPECTRUM"
tbhdu.header["DETCHANS"] = spec.size
tbhdu.header["TOTCTS"] = spec.sum()
tbhdu.header["EXPOSURE"] = exp_time
tbhdu.header["LIVETIME"] = exp_time
tbhdu.header["CONTENT"] = spectype
tbhdu.header["HDUCLASS"] = "OGIP"
tbhdu.header["HDUCLAS1"] = "SPECTRUM"
tbhdu.header["HDUCLAS2"] = "TOTAL"
tbhdu.header["HDUCLAS3"] = "TYPE:I"
tbhdu.header["HDUCLAS4"] = "COUNT"
tbhdu.header["HDUVERS"] = "1.1.0"
tbhdu.header["HDUVERS1"] = "1.1.0"
tbhdu.header["CHANTYPE"] = spectype
tbhdu.header["BACKFILE"] = "none"
tbhdu.header["CORRFILE"] = "none"
tbhdu.header["POISSERR"] = True
for key in ["RESPFILE", "ANCRFILE", "MISSION", "TELESCOP", "INSTRUME"]:
tbhdu.header[key] = parameters[key]
tbhdu.header["AREASCAL"] = 1.0
tbhdu.header["CORRSCAL"] = 0.0
tbhdu.header["BACKSCAL"] = 1.0
hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), tbhdu])
hdulist.writeto(specfile, overwrite=overwrite)
def write_spectrum(evtfile, specfile, overwrite=False):
r"""
Bin event energies into a spectrum and write it to
a FITS binary table. Does not do any grouping of
channels, and will automatically determine PI or PHA.
Parameters
----------
evtfile : string
The name of the event file to read the events from.
specfile : string
The name of the spectrum file to be written.
overwrite : boolean, optional
Whether or not to overwrite an existing file with
the same name. Default: False
"""
from xcs_soxs.instrument import RedistributionMatrixFile
parameters = {}
if isinstance(evtfile, string_types):
f = pyfits.open(evtfile)
spectype = f["EVENTS"].header["CHANTYPE"]
rmf = f["EVENTS"].header["RESPFILE"]
p = f["EVENTS"].data[spectype]
exp_time = f["EVENTS"].header["EXPOSURE"]
for key in ["RESPFILE", "ANCRFILE", "MISSION", "TELESCOP", "INSTRUME"]:
parameters[key] = f["EVENTS"].header[key]
f.close()
else:
rmf = evtfile["rmf"]
spectype = evtfile["channel_type"]
p = evtfile[spectype]
parameters["RESPFILE"] = os.path.split(rmf)[-1]
parameters["ANCRFILE"] = os.path.split(evtfile["arf"])[-1]
parameters["TELESCOP"] = evtfile["telescope"]
parameters["INSTRUME"] = evtfile["instrument"]
parameters["MISSION"] = evtfile["mission"]
exp_time = evtfile["exposure_time"]
rmf = RedistributionMatrixFile(rmf)
minlength = rmf.n_ch
if rmf.cmin == 1:
minlength += 1
spec = np.bincount(p, minlength=minlength)
if rmf.cmin == 1:
spec = spec[1:]
bins = (np.arange(rmf.n_ch)+rmf.cmin).astype("int32")
_write_spectrum(bins, spec, exp_time, spectype, parameters,
specfile, overwrite=overwrite)
def write_radial_profile(evt_file, out_file, ctr, rmin,
rmax, nbins, ctr_type="celestial",
emin=None, emax=None, expmap_file=None,
overwrite=False):
r"""
Bin up events into a radial profile and write them to a FITS
table.
Parameters
----------
evt_file : string
Input event file.
out_file : string
The output file to write the profile to.
ctr : array-like
The central coordinate of the profile. Can either be in
celestial coordinates (the default) or "physical" pixel
coordinates. If the former, the ``ctr_type`` keyword
argument must be explicity set to "physical".
rmin : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`
The minimum radius of the profile, in arcseconds.
rmax : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`
The maximum radius of the profile, in arcseconds.
nbins : integer
The number of bins in the profile.
ctr_type : string, optional
The type of center coordinate. Either "celestial" for
(RA, Dec) coordinates (the default), or "physical" for
pixel coordinates.
emin : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`, optional
The minimum energy of the events to be binned in keV.
Default is the lowest energy available.
emax : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`, optional
The maximum energy of the events to be binned in keV.
Default is the highest energy available.
overwrite : boolean, optional
Whether or not to overwrite an existing file with the
same name. Default: False
expmap_file : string, optional
Supply an exposure map file to determine fluxes.
Default: None
"""
import astropy.wcs as pywcs
rmin = parse_value(rmin, "arcsec")
rmax = parse_value(rmax, "arcsec")
f = pyfits.open(evt_file)
hdu = f["EVENTS"]
orig_dx = hdu.header["TCDLT3"]
e = hdu.data["ENERGY"]
if emin is None:
emin = e.min()
else:
emin = parse_value(emin, "keV")
emin *= 1000.
if emax is None:
emax = e.max()
else:
emax = parse_value(emax, "keV")
emax *= 1000.
idxs = np.logical_and(e > emin, e < emax)
x = hdu.data["X"][idxs]
y = hdu.data["Y"][idxs]
exp_time = hdu.header["EXPOSURE"]
w = wcs_from_event_file(f)
dtheta = np.abs(w.wcs.cdelt[1])*3600.0
f.close()
if ctr_type == "celestial":
ctr = w.all_world2pix(ctr[0], ctr[1], 1)
r = np.sqrt((x-ctr[0])**2+(y-ctr[1])**2)
rr = np.linspace(rmin/dtheta, rmax/dtheta, nbins+1)
C = np.histogram(r, bins=rr)[0]
rbin = rr*dtheta
rmid = 0.5*(rbin[1:]+rbin[:-1])
A = np.pi*(rbin[1:]**2-rbin[:-1]**2)
Cerr = np.sqrt(C)
R = C/exp_time
Rerr = Cerr/exp_time
S = R/A
Serr = Rerr/A
col1 = pyfits.Column(name='RLO', format='D', unit='arcsec', array=rbin[:-1])
col2 = pyfits.Column(name='RHI', format='D', unit='arcsec', array=rbin[1:])
col3 = pyfits.Column(name='RMID', format='D', unit='arcsec', array=rmid)
col4 = pyfits.Column(name='AREA', format='D', unit='arcsec**2', array=A)
col5 = pyfits.Column(name='NET_COUNTS', format='D', unit='count', array=C)
col6 = pyfits.Column(name='NET_ERR', format='D', unit='count', array=Cerr)
col7 = pyfits.Column(name='NET_RATE', format='D', unit='count/s', array=R)
col8 = pyfits.Column(name='ERR_RATE', format='D', unit='count/s', array=Rerr)
col9 = pyfits.Column(name='SUR_BRI', format='D', unit='count/s/arcsec**2', array=S)
col10 = pyfits.Column(name='SUR_BRI_ERR', format='1D', unit='count/s/arcsec**2', array=Serr)
coldefs = [col1, col2, col3, col4, col5, col6, col7, col8, col9, col10]
if expmap_file is not None:
f = pyfits.open(expmap_file)
ehdu = f["EXPMAP"]
wexp = pywcs.WCS(header=ehdu.header)
cel = w.all_pix2world(ctr[0], ctr[1], 1)
ectr = wexp.all_world2pix(cel[0], cel[1], 1)
exp = ehdu.data[:,:]
nx, ny = exp.shape
reblock = ehdu.header["CDELT2"]/orig_dx
x, y = np.mgrid[1:nx+1,1:ny+1]
r = np.sqrt((x-ectr[0])**2 + (y-ectr[1])**2)
f.close()
E = np.histogram(r, bins=rr/reblock, weights=exp)[0] / np.histogram(r, bins=rr/reblock)[0]
with np.errstate(invalid='ignore', divide='ignore'):
F = R/E
Ferr = Rerr/E
SF = F/A
SFerr = Ferr/A
col11 = pyfits.Column(name='MEAN_SRC_EXP', format='D', unit='cm**2', array=E)
col12 = pyfits.Column(name='NET_FLUX', format='D', unit='count/s/cm**2', array=F)
col13 = pyfits.Column(name='NET_FLUX_ERR', format='D', unit='count/s/cm**2', array=Ferr)
col14 = pyfits.Column(name='SUR_FLUX', format='D', unit='count/s/cm**2/arcsec**2', array=SF)
col15 = pyfits.Column(name='SUR_FLUX_ERR', format='D', unit='count/s/cm**2/arcsec**2', array=SFerr)
coldefs += [col11, col12, col13, col14, col15]
tbhdu = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(coldefs))
tbhdu.name = "PROFILE"
hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), tbhdu])
hdulist.writeto(out_file, overwrite=overwrite)
coord_types = {"sky": ("X", "Y", 2, 3),
"det": ("DETX", "DETY", 6, 7)}
def write_image(evt_file, out_file, coord_type='sky', emin=None, emax=None,
overwrite=False, expmap_file=None, reblock=1):
r"""
Generate a image by binning X-ray counts and write
it to a FITS file.
Parameters
----------
evt_file : string
The name of the input event file to read.
out_file : string
The name of the image file to write.
coord_type : string, optional
The type of coordinate to bin into an image.
Can be "sky" or "det". Default: "sky"
emin : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`, optional
The minimum energy of the photons to put in the
image, in keV.
emax : float, (value, unit) tuple, or :class:`~astropy.units.Quantity`, optional
The maximum energy of the photons to put in the
image, in keV.
overwrite : boolean, optional
Whether or not to overwrite an existing file with
the same name. Default: False
expmap_file : string, optional
Supply an exposure map file to divide this image by
to get a flux map. Default: None
reblock : integer, optional
Change this value to reblock the image to larger
pixel sizes (reblock >= 1). Only supported for
sky coordinates. Default: 1
"""
if coord_type == "det" and reblock > 1:
raise RuntimeError("Reblocking images is not supported "
"for detector coordinates!")
f = pyfits.open(evt_file)
e = f["EVENTS"].data["ENERGY"]
if emin is None:
emin = e.min()
else:
emin = parse_value(emin, "keV")
emin *= 1000.
if emax is None:
emax = e.max()
else:
emax = parse_value(emax, "keV")
emax *= 1000.
idxs = np.logical_and(e > emin, e < emax)
xcoord, ycoord, xcol, ycol = coord_types[coord_type]
x = f["EVENTS"].data[xcoord][idxs]
y = f["EVENTS"].data[ycoord][idxs]
exp_time = f["EVENTS"].header["EXPOSURE"]
xmin = f["EVENTS"].header["TLMIN%d" % xcol]
ymin = f["EVENTS"].header["TLMIN%d" % ycol]
xmax = f["EVENTS"].header["TLMAX%d" % xcol]
ymax = f["EVENTS"].header["TLMAX%d" % ycol]
if coord_type == 'sky':
xctr = f["EVENTS"].header["TCRVL%d" % xcol]
yctr = f["EVENTS"].header["TCRVL%d" % ycol]
xdel = f["EVENTS"].header["TCDLT%d" % xcol]*reblock
ydel = f["EVENTS"].header["TCDLT%d" % ycol]*reblock
f.close()
nx = int(xmax-xmin)//reblock
ny = int(ymax-ymin)//reblock
xbins = np.linspace(xmin, xmax, nx+1, endpoint=True)
ybins = np.linspace(ymin, ymax, ny+1, endpoint=True)
H, xedges, yedges = np.histogram2d(x, y, bins=[xbins, ybins])
if expmap_file is not None:
if coord_type == "det":
raise RuntimeError("Cannot divide by an exposure map for images "
"binned in detector coordinates!")
f = pyfits.open(expmap_file)
if f["EXPMAP"].shape != (nx, ny):
raise RuntimeError("Exposure map and image do not have the same shape!!")
with np.errstate(invalid='ignore', divide='ignore'):
H /= f["EXPMAP"].data.T
H[np.isinf(H)] = 0.0
H = np.nan_to_num(H)
H[H < 0.0] = 0.0
f.close()
hdu = pyfits.PrimaryHDU(H.T)
if coord_type == 'sky':
hdu.header["MTYPE1"] = "EQPOS"
hdu.header["MFORM1"] = "RA,DEC"
hdu.header["CTYPE1"] = "RA---TAN"
hdu.header["CTYPE2"] = "DEC--TAN"
hdu.header["CRVAL1"] = xctr
hdu.header["CRVAL2"] = yctr
hdu.header["CUNIT1"] = "deg"
hdu.header["CUNIT2"] = "deg"
hdu.header["CDELT1"] = xdel
hdu.header["CDELT2"] = ydel
hdu.header["CRPIX1"] = 0.5*(nx+1)
hdu.header["CRPIX2"] = 0.5*(ny+1)
else:
hdu.header["CUNIT1"] = "pixel"
hdu.header["CUNIT2"] = "pixel"
hdu.header["EXPOSURE"] = exp_time
hdu.writeto(out_file, overwrite=overwrite)
def plot_spectrum(specfile, plot_energy=True, lw=2, xmin=None, xmax=None,
ymin=None, ymax=None, xscale=None, yscale=None,
label=None, fontsize=18, fig=None, ax=None,
plot_counts=False, **kwargs):
"""
Make a quick Matplotlib plot of a convolved spectrum
from a file. A Matplotlib figure and axis is returned.
Parameters
----------
specfile : string
The file to be opened for plotting.
figsize : tuple of integers, optional
The size of the figure on both sides in inches.
Default: (10,10)
plot_energy : boolean, optional
Whether to plot in energy or channel space. Default is
to plot in energy, unless the RMF for the spectrum
cannot be found.
lw : float, optional
The width of the lines in the plots. Default: 2.0 px.
xmin : float, optional
The left-most energy (in keV) or channel to plot. Default is the
minimum value in the spectrum.
xmax : float, optional
The right-most energy (in keV) or channel to plot. Default is the
maximum value in the spectrum.
ymin : float, optional
The lower extent of the y-axis. By default it is set automatically.
ymax : float, optional
The upper extent of the y-axis. By default it is set automatically.
xscale : string, optional
The scaling of the x-axis of the plot. Default: "log"
yscale : string, optional
The scaling of the y-axis of the plot. Default: "log"
label : string, optional
The label of the spectrum. Default: None
fontsize : int
Font size for labels and axes. Default: 18
fig : :class:`~matplotlib.figure.Figure`, optional
A Figure instance to plot in. Default: None, one will be
created if not provided.
ax : :class:`~matplotlib.axes.Axes`, optional
An Axes instance to plot in. Default: None, one will be
created if not provided.
plot_counts : boolean, optional
If set to True, the counts instead of the count rate will
be plotted. Default: False
Returns
-------
A tuple of the :class:`~matplotlib.figure.Figure` and the :class:`~matplotlib.axes.Axes` objects.
"""
import matplotlib.pyplot as plt
from xcs_soxs.instrument import RedistributionMatrixFile
f = pyfits.open(specfile)
hdu = f["SPECTRUM"]
chantype = hdu.header["CHANTYPE"]
rmf = hdu.header.get("RESPFILE", None)
xerr = None
if plot_energy:
if rmf is not None:
rmf = RedistributionMatrixFile(rmf)
x = 0.5*(rmf.ebounds_data["E_MIN"]+rmf.ebounds_data["E_MAX"])
xerr = 0.5*(rmf.ebounds_data["E_MAX"]-rmf.ebounds_data["E_MIN"])
xlabel = "Energy (keV)"
else:
raise RuntimeError("Cannot find the RMF associated with this "
"spectrum, so I cannot plot in energy!")
else:
x = hdu.data[chantype]
xlabel = "Channel (%s)" % chantype
if plot_counts:
y = hdu.data["COUNTS"].astype("float64")
yerr = np.sqrt(y)
else:
if "COUNT_RATE" in hdu.columns.names:
y = hdu.data["COUNT_RATE"]
else:
y = hdu.data["COUNTS"]/hdu.header["EXPOSURE"]
yerr = np.sqrt(hdu.data["COUNTS"])/hdu.header["EXPOSURE"]
if plot_energy:
yunit = "keV"
y /= 2.0*xerr
yerr /= 2.0*xerr
else:
yunit = "bin"
f.close()
if fig is None:
fig = plt.figure(figsize=(10, 10))
if xscale is None:
if ax is None:
xscale = "log"
else:
xscale = ax.get_xscale()
if yscale is None:
if ax is None:
yscale = "log"
else:
yscale = ax.get_yscale()
if ax is None:
ax = fig.add_subplot(111)
ax.errorbar(x, y, yerr=yerr, xerr=xerr, lw=lw, label=label, **kwargs)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xlabel(xlabel, fontsize=fontsize)
if plot_counts:
ylabel = "Counts (counts/%s)"
else:
ylabel = "Count Rate (counts/s/%s)"
ax.set_ylabel(ylabel % yunit, fontsize=fontsize)
ax.tick_params(axis='both', labelsize=fontsize)
return fig, ax
|
StarcoderdataPython
|
1637637
|
<gh_stars>0
import os
import numpy as np
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = ''
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="saved_models/CNN1d_CTC_PinYin_Sample_lessDropout/MagicData/(gpu_n=1)(feature_name=mel)(label_type=pinyin)/best_val_loss(epoch=70)(loss=7.7)(val_loss=10.5)converted_predict_model.tflite")
interpreter.allocate_tensors()
|
StarcoderdataPython
|
6703672
|
class ContextualHelp(object):
"""
Contains the details for how Revit should allow invocation of contextual help for an item added by an application.
ContextualHelp(helpType: ContextualHelpType,helpPath: str)
"""
def Launch(self):
"""
Launch(self: ContextualHelp)
Launches and displays the help topic specified by the contents of this
ContextualHelp object.
"""
pass
@staticmethod
def __new__(self, helpType, helpPath):
""" __new__(cls: type,helpType: ContextualHelpType,helpPath: str) """
pass
HelpPath = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The context id,help URL,or help file path.
Get: HelpPath(self: ContextualHelp) -> str
Set: HelpPath(self: ContextualHelp)=value
"""
HelpTopicUrl = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The help topic URL.
Get: HelpTopicUrl(self: ContextualHelp) -> str
Set: HelpTopicUrl(self: ContextualHelp)=value
"""
HelpType = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The contextual help type.
Get: HelpType(self: ContextualHelp) -> ContextualHelpType
Set: HelpType(self: ContextualHelp)=value
"""
|
StarcoderdataPython
|
6450066
|
from setuptools import setup, find_packages
from os.path import join, dirname
import pyrecsys
with open('requirements.txt') as f:
reqs = f.read().splitlines()
setup(
name='pyrecsys',
version=pyrecsys.__version__,
packages=['pyrecsys', 'pyrecsys._polara.lib'],
author = "<NAME>",
author_email = "<EMAIL>",
description = "Collaborative filtering recommender system",
license = "MIT",
url = "https://github.com/vlarine/pyrecsys",
long_description=open(join(dirname(__file__), 'README.md')).read(),
install_requires=reqs,
)
|
StarcoderdataPython
|
6618288
|
<reponame>Sudani-Coder/python
## project: 9
# text analyzer
vowels = ("a", "e", "i", "o", "u")
def count_char(text, char):
count = 0
for each in text:
if each == char:
count += 1
return count
def count_vowels(text):
count = 0
for each in text:
if each in vowels:
count += 1
return count
def perc_char(text):
for char in "abcdefghijklmnopqrstuvwxyz":
perc = 100 * count_char(text, char) / len(text)
print("{0} - {1}%".format(char, round(perc, 2)))
filename = input("Please Enter The File Name -->")
thechar = input("Please Enter The char -->").lower()
with open(filename, "rt") as myfile:
text = myfile.read()
print("the char {} repeted {} number of times".format(thechar ,count_char(text, thechar)))
print("the number of vowels letters is {}".format(count_vowels(text)))
perc_char(text)
|
StarcoderdataPython
|
3378971
|
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
class Phones4uSpider(BaseSpider):
name = 'phones4u.co.uk'
allowed_domains = ['phones4u.co.uk']
start_urls = ['http://www.phones4u.co.uk/shop/shop_payg_main.asp?intcid=PAYG%20Phones']
def parse(self, response):
hxs = HtmlXPathSelector(response)
categories = hxs.select('//*[@id="secBody"]/table/tbody/tr/td/a/@href').extract()
for category in categories:
url = urljoin_rfc(response.url, category.strip(), response.encoding)
yield Request(url, callback=self.parse_pages)
def parse_pages(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//*[@id="manphones"]/table/tbody/tr[not(@class="netempty")]/td[not(@class="manempty")]')
for product in products:
BASE_URL = 'http://www.phones4u.co.uk/'
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'a/text()')
relative_url = product.select('a/@href').extract()[0]
url = urljoin_rfc(BASE_URL, relative_url, response.encoding)
loader.add_value('url', url)
loader.add_xpath('price', 'text()')
yield loader.load_item()
|
StarcoderdataPython
|
5127894
|
# -*- coding: utf-8 -*-
"""
server.common.validators
~~~~~~~~~~~~~~~~~~~~~~~~
"""
def validate_phone_number(phone_number: str):
"""Validates Phone Number Strings"""
if not phone_number.isdecimal() or len(phone_number) != 10:
raise ValidationError("Value is not a 10-digit phone number!")
|
StarcoderdataPython
|
1888559
|
class Caesar:
def caesar(cleartext, key, alphabet, cipher):
result = ""
cleartext = cleartext.lower()
if not cipher:
key = len(alphabet) - key
for x in cleartext:
if x not in alphabet:
result += x
else:
i = alphabet.index(x)
j = (i+key) % len(alphabet)
result = result+alphabet[j]
return result
|
StarcoderdataPython
|
1744726
|
from __future__ import annotations
from typing import Any, Iterable, Literal, Sequence
import attr
import networkx as nx
__all__ = ["PoSet", "Pair", "Chain", "CMP"]
Pair = tuple[Any, Any]
Chain = Sequence[Any]
CMP = Literal["<", ">", "||", "="]
@attr.frozen
class PoSet:
"""Hasse diagram representation of partially ordered set.
"""
hasse: nx.DiGraph = attr.ib(factory=nx.DiGraph)
def __len__(self) -> int:
return len(self.hasse)
def __iter__(self) -> Iterable[Any]:
yield from self.hasse.nodes
def compare(self, left: Any, right: Any) -> CMP:
if left == right:
return "="
elif nx.has_path(self.hasse, left, right):
return "<"
elif nx.has_path(self.hasse, right, left):
return ">"
return "||"
def __contains__(self, elem: Any) -> bool:
return elem in self.hasse.nodes
def add(self, chain: Chain) -> PoSet:
hasse = nx.DiGraph(self.hasse)
nx.add_path(hasse, chain)
return attr.evolve(self, hasse=nx.transitive_reduction(hasse))
@staticmethod
def from_chains(*chains: list[Chain]) -> PoSet:
hasse = nx.DiGraph()
for chain in chains:
nx.add_path(hasse, chain)
return PoSet(nx.transitive_reduction(hasse))
|
StarcoderdataPython
|
29630
|
import unittest
from unittest.mock import patch, Mock
from werkzeug.datastructures import FileStorage
import io
import json
from app import app
from app.models.base import db
from app.models.user import User
from app.auth.views import UserPassportphotoView
from app.auth import views
class AuthUploadPassportPhotoTestCase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
app.testing = True
self.user_data = {
"username": "john123",
"email": "<EMAIL>",
"password": "<PASSWORD>",
}
with app.app_context():
db.drop_all()
db.create_all()
# create admin user
user = User(
username="john123",
email="<EMAIL>",
password="<PASSWORD>",
role=True,
)
user.save()
@patch.object(views.UserPassportphotoView, "post")
def test_upload_passport_photo(self, mock_post):
upload = UserPassportphotoView()
mock_post.return_value.status_code = 200
res = upload.post(
"/api/v1/auth/upload",
data=dict(file=(io.BytesIO(b"abcdef"), "test.jpg")),
headers={"Content-Type": "multipart/form-data"},
)
self.assertEqual(res.status_code, 200)
def test_upload_photo_with_non_allowed_ext(self):
res = self.app.post(
"/api/v1/auth/login",
data=json.dumps(self.user_data),
headers={"Content-Type": "application/json"},
)
token = json.loads(res.data.decode())["access_token"]
data = {"file": (io.BytesIO(b'my file contents'), 'hello.txt')}
result = self.app.post(
"/api/v1/auth/upload", buffered=True,
headers={
"Authorization": token,
"Content-Type" : 'multipart/form-data',
},
data=data,
)
self.assertEqual(result.status_code, 400)
def test_no_photo_upload(self):
res = self.app.post(
"/api/v1/auth/login",
data=json.dumps(self.user_data),
headers={"Content-Type": "application/json"},
)
token = json.loads(res.data.decode())["access_token"]
result = self.app.post(
"/api/v1/auth/upload", buffered=True,
headers={
"Authorization": token,
"Content-Type" : 'multipart/form-data',
},
data={},
)
self.assertEqual(result.status_code, 400)
|
StarcoderdataPython
|
1738398
|
<reponame>naderm/django_rest_omics
from proteomics import models
from rest_framework import serializers
# Serializers define the API representation.
class PeptideMethodSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.PeptideMethod
class MSMethodSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.MSMethod
class PeptideDataSetSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.PeptideDataSet
class PeptideSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Peptide
class ProteinSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Protein
|
StarcoderdataPython
|
1664612
|
#!/usr/bin/env python 2.x
import socketserver, socket
import threading
from os.path import exists, isfile
from time import sleep, strftime, time, localtime
import signal
from aes import myAES
from os import system, fork, kill
from sys import argv
from random import shuffle, randrange
HOST = ''
PORT = 10007
aes = myAES('test', 'test')
pidfile = '.chatserverP.pid'
dummy_proc = '0000'
VER = '1.4 Public'
class UserManager:
def __init__(self):
self.users = {}
self.members = []
self.lock = threading.Lock()
def addUser(self, username, conn, addr):
global WELCOME_EMOTI
if username in self.users:
msg = aes.enc('이미 등록된 사용자입니다.\n'.encode())
conn.send(msg)
return None
if len(self.users) > 5:
msg = aes.enc('사용자수 초과입니다.\n'.encode())
conn.send(msg)
return None
with self.lock:
self.users[username] = (conn, addr)
self.members.append(username)
try:
welcome_msg = '/welcome;%s' %username
welcome_msg = aes.enc(welcome_msg.encode())
conn.send(welcome_msg)
memberlist = '#'.join(self.members)
self.sendMessageToAll('/addmember;%s;[%s]님이 입장했습니다.\n\n' %(memberlist, username))
except Exception as e:
writeLog('Error-1: ' + str(e))
pass
return username
def removeUser(self, username):
if username not in self.users:
return
with self.lock:
del self.users[username]
if username in self.members:
self.members.remove(username)
if username+'(X)' in self.members:
self.members.remove(username+'(X)')
memberlist = '#'.join(self.members)
self.sendMessageToAll('/delmember;%s;[%s]님이 퇴장했습니다.\n\n' %(memberlist, username))
def messageHandler(self, username, msg):
msg = msg.strip()
try:
if msg[0] != '/':
self.sendMessageToAll('/msgbody;%s;%s' %(username, msg))
return
if msg == '/absence':
pos = self.members.index(username)
self.members[pos] = username + '(X)'
memberlist = '#'.join(self.members)
self.sendMessageToAll('/updatepresence;%s' %memberlist)
return
if msg == '/presence':
pos = self.members.index(username+'(X)')
self.members[pos] = username
memberlist = '#'.join(self.members)
self.sendMessageToAll('/updatepresence;%s' %memberlist)
return
if '/emoticon' in msg:
self.sendMessageToAll(msg)
return
if msg == '/quit':
msg = aes.enc('/bye'.encode())
self.users[username][0].send(msg)
self.removeUser(username)
return -1
if msg == '/keypressed':
self.sendMessageWithoutMe(username, 0)
return
if msg == '/emptymsg':
self.sendMessageWithoutMe(username, 1)
return
if '/personalchat' in msg:
tmp = msg.split(';')
self.sendMessage2Users(username, tmp[1], ';'.join(tmp[2:]))
return
except Exception as e:
writeLog('Error-1-1: '+str(e))
def sendMessageToAll(self, msg):
try:
msg = aes.enc(msg.encode())
for conn, addr in self.users.values():
conn.send(msg)
#print('SENDINGALL')
except Exception as e:
writeLog('Error-2: ' + str(e))
pass
def sendMessageWithoutMe(self, username, mode):
try:
if mode == 0:
msg = '/keypressed;%s' %username
else:
msg = '/emptymsg;%s' %username
msg = aes.enc(msg.encode())
for user, val in self.users.items():
if user == username:
continue
val[0].send(msg)
except Exception as e:
writeLog('Error-3: ' + str(e))
pass
def sendMessage2Users(self, username, yourname, msg):
try:
conn1 = self.users[username][0]
conn2 = self.users[yourname][0]
header = '/personalchat#%s#%s#' %(username, yourname)
msg = header + msg
msg = aes.enc(msg.encode())
conn1.send(msg)
conn2.send(msg)
except Exception as e:
writeLog('Error-4: '+str(e))
class MyTcpHandler(socketserver.BaseRequestHandler):
userman = UserManager()
heartbits = [chr(x) for x in range(40, 120)]
def handle(self):
self.request.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
username = ''
buf = b''
try:
while True:
msg = self.request.recv(65565)
if not msg:
break
if len(msg)%16 != 0:
#writeLog('INCOMMING MSG LENGTH [%d]' %len(msg))
buf += msg
if len(buf)%16 != 0:
continue
else:
msg = buf
buf = b''
msg = aes.dec(msg)
if msg == None:
continue
msg = msg.strip()
#print(msg)
#writeLog('INCOMING MSG [%s]: %s' %(username, msg))
if msg == '/login':
username = self.registerUsername()
continue
if '$%#' in msg:
shuffle(self.heartbits)
randstr = ''.join(self.heartbits)
scope1 = randrange(1, max(2,len(randstr)-1))
scope2 = randrange(1, max(2,scope1))
if scope1 == scope2:
scope1 += 1
msg = randstr[:scope2] + '$%#' + randstr[scope2:scope1]
msg = aes.enc(msg.encode())
self.request.send(msg)
#writeLog('INCOMING HEARTBIT[%s]' %username)
continue
ret = self.userman.messageHandler(username, msg)
#writeLog('CALL Message Handler')
if ret == -1:
self.request.close()
break
except Exception as e:
log = 'Error-5: [%s] %s' %(username, str(e))
writeLog(log)
self.userman.removeUser(username)
pass
def registerUsername(self):
try:
flag = True
try:
with open('notice.txt', 'r') as f:
notice = f.read()
except:
notice = ''
while True:
if flag:
msg = '끼리서버 ver. %s\n\n%s\n[대화명을 입력하세요]\n' %(VER, notice)
msg = aes.enc(msg.encode())
self.request.send(msg)
flag = False
username = self.request.recv(65565)
username = aes.dec(username)
if username[0] == '/':
continue
#writeLog('Call Add Username: %s' %username)
if self.userman.addUser(username, self.request, self.client_address):
return username
except Exception as e:
writeLog('Error-6: '+str(e))
self.userman.removeUser(username)
pass
def writeLog(log):
tstr = strftime('%Y-%m-%d %H:%M:%S;', localtime())
log = tstr + log + '\n'
with open('chat2.log', 'a') as f:
f.writelines(log)
class ChatingServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def runServer(args):
if len(args) == 0:
print('사용법: ./runchat -s or -x')
return
if args[0] == '-s':
if isfile(pidfile):
print('서버가 이미 구동중입니다.')
return
try:
with open(pidfile, 'w') as h:
h.write(dummy_proc)
except Exception as e:
print(str(e))
return
pid = fork()
if pid == 0: # Child Process
try:
server = ChatingServer((HOST, PORT), MyTcpHandler)
server.serve_forever()
except KeyboardInterrupt:
server.shutdown()
server.server_close()
else:
print('+++ 채팅 서버를 시작합니다.')
with open(pidfile, 'w') as h:
h.write('%d' %pid)
elif args[0] == '-x':
try:
with open(pidfile, 'r') as h:
pid = h.readline()
except Exception as e:
print('.chatserver.pid 파일을 읽지 못했습니다.')
return
try:
kill(int(pid), signal.SIGINT)
print('--- 채팅 서버를 종료중입니다..')
sleep(1)
kill(int(pid), signal.SIGINT)
except:
pass
cmd = 'rm -rf %s' %pidfile
system(cmd)
print('--- 채팅 서버를 종료했습니다.')
else:
print('사용법: ./runchat -s or -x')
if __name__ == '__main__':
runServer(argv[1:])
|
StarcoderdataPython
|
9668882
|
from jinja2.utils import soft_unicode
'''
USAGE:
- debug:
msg: "{{ vpc.subnets | get_public_subnets_ids('Type','Public') }}"
'''
class FilterModule(object):
def filters(self):
return {
'get_public_subnets_ids': get_public_subnets_ids,
}
def get_public_subnets_ids(list, tag_key, tag_value):
subnets_ids = []
for item in list:
for key, value in item['resource_tags'].iteritems():
if key == tag_key and value == tag_value:
subnets_ids.append(item['id'])
return subnets_ids
|
StarcoderdataPython
|
3224623
|
<reponame>andraantariksa/code-exercise-answer
def convertTabs(code, x):
return code.replace("\t", " " * x)
|
StarcoderdataPython
|
5026495
|
<reponame>NunoEdgarGFlowHub/poptorch
#!/usr/bin/env python3
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import torch
import poptorch
import pytest
# Tensors
# Creation ops (we don't support many of these)
# torch.numel, torch.tensor, torch.sparse_coo_tensor, torch.as_tensor, torch.as_strided, torch.from_numpy, torch.zeros,
# torch.zeros_like, torch.ones, torch.ones_like, torch.arange, torch.range, torch.linspace, torch.logspace, torch.eye,
# torch.empty, torch.empty_like, torch.empty_strided, torch.full, torch.full_like, torch.quantize_per_tensor, torch.quantize_per_channel,
# Indexing, Slicing, Joining, Mutating Ops
# torch.cat, torch.chunk, torch.gather, torch.index_select, torch.masked_select, torch.narrow, torch.nonzero, torch.reshape, torch.split,
# torch.squeeze, torch.stack, torch.t, torch.take, torch.transpose, torch.unbind, torch.unsqueeze, torch.where, torch._C.Generator,
# torch._C.Generator.device,
@pytest.mark.parametrize("dtype", (torch.float16, torch.float32, torch.int32))
def test_zeros_and_ones(dtype):
class Model(torch.nn.Module):
def forward(self, z):
x = torch.zeros(3, 5, 1, dtype=dtype)
y = torch.ones(3, 5, 1, dtype=dtype)
return (x * y) + z, (y + x) + z
model = Model()
# Run on CPU.
tensor_in = torch.tensor([1.0], dtype=dtype)
nativeOut = model(tensor_in)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(tensor_in)
assert torch.equal(nativeOut[0], poptorch_out[0])
assert torch.equal(nativeOut[1], poptorch_out[1])
def test_cat():
class Model(torch.nn.Module):
def forward(self, x):
return torch.cat((x, x, x), 0)
model = Model()
x = torch.randn(2, 3)
# Run on CPU.
nativeOut = model(x)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
assert nativeOut.size() == poptorch_out.size()
assert torch.equal(nativeOut, poptorch_out)
def test_chunk():
class Model(torch.nn.Module):
def forward(self, x):
return torch.chunk(x, 5)
model = Model()
x = torch.randn(20, 10)
# Run on CPU.
nativeOut = model(x)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
for native, pop in zip(nativeOut, poptorch_out):
assert torch.equal(native, pop)
def test_reshape():
class Model(torch.nn.Module):
def forward(self, x):
return torch.reshape(x, (1, 1, 2, 2))
model = Model()
x = torch.arange(4.)
# Run on CPU.
nativeOut = model(x)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
assert nativeOut.size() == poptorch_out.size()
assert torch.equal(nativeOut, poptorch_out)
@pytest.mark.parametrize("split_size_or_sections",
(1, 5, 6, 20, [10, 10], [19, 1]))
def test_split(split_size_or_sections):
class Model(torch.nn.Module):
def forward(self, x):
return torch.split(x, split_size_or_sections)
model = Model()
x = torch.randn(20, 10)
# Run on CPU.
native_out = model(x)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
for native, pop in zip(native_out, poptorch_out):
assert native.size() == pop.size()
assert torch.equal(native, pop)
def test_squeeze():
class Model(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x)
model = Model()
x = torch.randn(1, 1, 20, 1, 10, 1)
# Run on CPU.
nativeOut = model(x)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
assert nativeOut.size() == poptorch_out.size()
assert torch.equal(nativeOut, poptorch_out)
def test_t():
class Model(torch.nn.Module):
def forward(self, x):
return torch.t(x)
model = Model()
x = torch.randn(20, 10)
# Run on CPU.
nativeOut = model(x)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
assert nativeOut.size() == poptorch_out.size()
assert torch.equal(nativeOut, poptorch_out)
def test_transpose():
class Model(torch.nn.Module):
def forward(self, x):
return torch.transpose(x, 3, 0)
model = Model()
x = torch.randn(3, 2, 5, 10)
# Run on CPU.
nativeOut = model(x)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
assert nativeOut.size() == poptorch_out.size()
assert torch.equal(nativeOut, poptorch_out)
def test_unsqueeze():
class Model(torch.nn.Module):
def forward(self, x):
return torch.unsqueeze(x, 1)
model = Model()
x = torch.randn(3, 2, 5, 10)
# Run on CPU.
nativeOut = model(x)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
assert nativeOut.size() == poptorch_out.size()
assert torch.equal(nativeOut, poptorch_out)
def test_expand():
class Model(torch.nn.Module):
def forward(self, x):
return x.expand(3, 4)
model = Model()
x = torch.randn(3, 1)
# Run on CPU.
nativeOut = model(x)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
assert nativeOut.size() == poptorch_out.size()
assert torch.equal(nativeOut, poptorch_out)
def test_expand_as():
class Model(torch.nn.Module):
def forward(self, x, y):
return x.expand_as(y)
model = Model()
x = torch.randn(3, 1)
y = torch.randn(3, 4)
# Run on CPU.
nativeOut = model(x, y)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x, y)
assert nativeOut.size() == poptorch_out.size()
assert torch.equal(nativeOut, poptorch_out)
def test_flatten():
class Model(torch.nn.Module):
def forward(self, x):
return torch.flatten(x)
model = Model()
x = torch.randn(3, 1)
# Run on CPU.
nativeOut = model(x)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
assert nativeOut.size() == poptorch_out.size()
assert torch.equal(nativeOut, poptorch_out)
def test_view():
class Model(torch.nn.Module):
def forward(self, x):
return x.view((15, 2, 5))
model = Model()
x = torch.randn(30, 5)
# Run on CPU.
nativeOut = model(x)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
assert nativeOut.size() == poptorch_out.size()
assert torch.equal(nativeOut, poptorch_out)
@pytest.mark.parametrize("input_shapes", [(1, ), (2, ), (2, 2), (2, 3, 4)])
def test_size(input_shapes):
class Model(torch.nn.Module):
def forward(self, x):
# Use size as input to another operation to workaround pruning error
return x.view(x.size())
model = Model()
x = torch.ones(*input_shapes)
# Run on CPU.
native_out = model(x)
assert torch.equal(x, native_out)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
assert native_out.size() == poptorch_out.size()
assert torch.equal(native_out, poptorch_out)
input_shapes = [(1, 4, 5), (2, ), (2, 2), (2, 3, 4, 1, 3, 4)]
dtypes = [torch.float, torch.float16, torch.int32]
@pytest.mark.parametrize("input_shapes", input_shapes)
@pytest.mark.parametrize("t", dtypes)
def test_fill(input_shapes, t):
float_test_num = 1.9375
class Model(torch.nn.Module):
def forward(self, x):
value = 42 if x.dtype == torch.int32 else float_test_num
return x.fill_(value), torch.full_like(x, value), torch.full(
input_shapes, value, dtype=x.dtype)
model = Model()
x = torch.ones(*input_shapes, dtype=t)
# Run on CPU.
if t != torch.float16:
native_out = model(x)
else:
native_out = (torch.full(input_shapes, float_test_num),
torch.full(input_shapes, float_test_num),
torch.full(input_shapes, float_test_num))
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
for native, pop in zip(native_out, poptorch_out):
if t == torch.float16:
pop = pop.float()
assert native.size() == pop.size()
assert torch.equal(native, pop)
assert native.dtype == pop.dtype
@pytest.mark.parametrize("input_shapes", input_shapes)
@pytest.mark.parametrize("value", [0.666, -4.32, float("Inf"), float("-Inf")])
def test_masked_fill(input_shapes, value):
torch.manual_seed(42)
class Model(torch.nn.Module):
def forward(self, x):
fill_result = x.masked_fill(x > 0.5, value)
where_result = torch.where(x > 0.5, x, torch.tensor(value))
return fill_result, where_result
model = Model()
x = torch.randn(*input_shapes)
# Run on CPU.
native_out = model(x)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x)
for pop, native in zip(poptorch_out, native_out):
assert native.size() == pop.size()
assert torch.equal(native, pop)
@pytest.mark.parametrize("input_shapes", [(1, ), (2, ), (3, 4), (1, 3, 4)])
@pytest.mark.parametrize("dim", [0, 1, 2])
def test_stack(input_shapes, dim):
if dim > len(input_shapes):
pytest.skip()
torch.manual_seed(42)
class Model(torch.nn.Module):
def forward(self, x, y, z):
return torch.stack([x, y, z], dim=dim)
model = Model()
a = torch.randn(*input_shapes)
b = torch.randn(*input_shapes)
c = torch.randn(*input_shapes)
# Run on CPU.
native_out = model(a, b, c)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(a, b, c)
for pop, native in zip(poptorch_out, native_out):
assert native.size() == pop.size()
assert torch.equal(native, pop)
@pytest.mark.parametrize("input_shapes", [(1, ), (2, ), (2, 3), (1, 3, 4)])
@pytest.mark.parametrize("dims",
[[1], [3], [2, 1], [2, 3], [1, 1, 1], [3, 2, 4]])
def test_repeat(input_shapes, dims):
if len(dims) < len(input_shapes):
pytest.skip(
"Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor."
)
torch.manual_seed(42)
class Model(torch.nn.Module):
def forward(self, x):
return x.repeat(dims)
model = Model()
a = torch.randn(*input_shapes)
# Run on CPU.
native_out = model(a)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(a)
for pop, native in zip(poptorch_out, native_out):
assert native.size() == pop.size()
assert torch.equal(native, pop)
@pytest.mark.parametrize("input_shapes", [(1, ), (2, ), (2, 3), (1, 3, 4)])
@pytest.mark.parametrize("dtype", [torch.float, torch.int])
def test_copy_(input_shapes, dtype):
torch.manual_seed(42)
class Model(torch.nn.Module):
def forward(self, x, y):
return y.copy_(x)
model = Model()
x = torch.randn(*input_shapes)
y = torch.empty_like(x, dtype=dtype)
# Run on CPU.
native_out = model(x, y)
# Run on IPU.
poptorch_model = poptorch.inferenceModel(model)
poptorch_out = poptorch_model(x, y)
for pop, native in zip(poptorch_out, native_out):
assert native.size() == pop.size()
assert native.dtype == pop.dtype
assert torch.equal(native, pop)
def test_detach():
torch.manual_seed(42)
class Model(torch.nn.Module):
def forward(self, x):
return x.detach()
model = Model()
poptorch_model = poptorch.inferenceModel(model)
x = torch.tensor([1.0], requires_grad=True)
# Run on IPU.
poptorch_out = poptorch_model(x)
assert not poptorch_out.requires_grad
|
StarcoderdataPython
|
8081570
|
# noqa: D100
from birdy.client import notebook
def test_is_notebook(): # noqa: D103
# we expect True or False but no exception
notebook.is_notebook()
|
StarcoderdataPython
|
1948987
|
from esahub import scihub, utils, checksum, check, main
import unittest
import contextlib
import logging
import re
import datetime as DT
import pytz
import os
import sys
import subprocess
from shapely.wkt import loads as wkt_loads
from esahub.tests import config as test_config
from esahub import config
logger = logging.getLogger('esahub')
PY2 = sys.version_info < (3, 0)
SMALL_SIZE_QUERY = 'size: ???.* KB'
if hasattr(unittest.TestCase, 'subTest'):
class TestCase(unittest.TestCase):
pass
else:
class TestCase(unittest.TestCase):
@contextlib.contextmanager
def subTest(self, msg='', **params):
"""Mock subTest method so no exception is raised under Python2."""
utils.eprint('subTest:', msg, params)
yield
return
# -----------------------------------------------------------------------------
# TEST SETUP
# -----------------------------------------------------------------------------
def setUpModule():
test_config.set_test_config()
test_config.prepare()
def tearDownModule():
test_config.cleanup()
# -----------------------------------------------------------------------------
# SCIHUB
# -----------------------------------------------------------------------------
class ScihubTestCase(TestCase):
@classmethod
def setUpClass(cls):
test_config.set_test_config()
# def setUp(self):
def test_servers(self):
for name in scihub._get_available_servers():
cfg = config.CONFIG['SERVERS'][name]
with self.subTest(server_name=name):
url = '{}/search?q=*:*'.format(cfg['host'])
response = scihub.get_response(url)
#
# Assert that the HTML response has status code 200 (OK)
#
self.assertEqual(response.status, 200)
def test__generate_next_url(self):
# _generate_next_url(url, total=None)
pass
def test__parse_page(self):
# _parse_page(url, first=False)
pass
def test__get_file_list_from_url(self):
# _get_file_list_from_url(url, limit=None)
pass
def test__callback(self):
# _callback(result)
pass
def test__build_query(self):
# _build_query(query={})
pass
def test__build_url(self):
# _build_url(query, server)
pass
def test__download(self):
# _download(url, destination, quiet=None, queue=None)
pass
def test__get_file_list_wrapper(self):
# _get_file_list_wrapper(url)
pass
def test__ping_single(self):
# _ping_single(servername)
pass
def test__auto_detect_server_from_query(self):
queries = [
# (query, server)
({'mission': 'Sentinel-1'},
config.CONFIG['SATELLITES']['S1A']['source']),
({'mission': 'Sentinel-2'},
config.CONFIG['SATELLITES']['S2A']['source']),
({'mission': 'Sentinel-3'},
config.CONFIG['SATELLITES']['S3A']['source']),
({'satellite': 'S1A'},
config.CONFIG['SATELLITES']['S1A']['source']),
({'satellite': 'S3A'},
config.CONFIG['SATELLITES']['S3A']['source']),
({'satellite': 'S2B'},
config.CONFIG['SATELLITES']['S2B']['source']),
({'identifier': "S1A_IW_OCN__2SDV_20160924T181320_"
"20160924T181345_013198_014FDF_6692.zip"},
config.CONFIG['SATELLITES']['S1A']['source'])
]
for query, server in queries:
with self.subTest(query=query):
self.assertEqual(
scihub._auto_detect_server_from_query(query), server
)
def test__uuid_from_identifier(self):
products = scihub.search({}, limit=1)
for product in products:
with self.subTest(product=product):
self.assertEqual(
scihub.block(scihub._uuid_from_identifier,
product['title']),
product['uuid']
)
# def test__download_url_from_identifier(self):
# # _download_url_from_identifier(identifier)
# pass
# def test__checksum_url_from_identifier(self):
# # _checksum_url_from_identifier(identifier)
# pass
# def test__preview_url_from_identifier(self):
# # _preview_url_from_identifier(identifier)
# pass
# def test__download_url_from_uuid(self):
# # _download_url_from_uuid(uuid, host=None)
# pass
# def test__checksum_url_from_uuid(self):
# # _checksum_url_from_uuid(uuid, host=None)
# pass
# def test__preview_url_from_uuid(self):
# # _preview_url_from_uuid(uuid, host=None)
# pass
def test_get_response(self):
for name in scihub._get_available_servers():
with self.subTest(server_name=name):
response = scihub.get_response(
scihub._build_url({'query': '*:*'}, name)
)
self.assertEqual(response.status, 200)
def test_md5_from_file(self):
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the md5 sum computed from the local file is equal
# to the md5 sum obtained from the remote server.
#
try:
remote_md5 = scihub.md5(f)
self.assertEqual(
checksum.md5(f), remote_md5
)
except Exception as e:
self.fail('Remote MD5 could not be obtained: {}'.format(e))
def test_exists_true(self):
existing = scihub.search({}, limit=1)
for e in existing:
with self.subTest(product=e['filename']):
self.assertTrue(scihub.exists(e['filename']))
def test_exists_false(self):
not_existing = 'this_is_not_on_scihub'
self.assertFalse(scihub.exists(not_existing))
# -----------------------------------------------------------------------------
# SCIHUB SEARCH
# -----------------------------------------------------------------------------
class ScihubSearchTestCase(TestCase):
@classmethod
def setUpClass(cls):
test_config.set_test_config()
def test_query_entries(self):
query = {'mission': 'Sentinel-3'}
server = scihub._auto_detect_server_from_query(query,
available_only=True)[0]
url = scihub._build_url(query, server)
html = scihub.resolve(url)
#
# Assert that the number of entries found on the page matches the
# number of entries requested per page.
#
self.assertEqual(html.count('<entry>'),
config.CONFIG['GENERAL']['ENTRIES'])
def test_orbit_query(self):
for search_str, orbit in [
('ASC', 'ASCENDING'),
('DESC', 'DESCENDING')
]:
query = {'orbit': search_str}
result = scihub.search(query, limit=20)
for prod in result:
self.assertEqual(prod['orbit_direction'], orbit)
def test_id_query(self):
prod = scihub.search({}, limit=5)[-1]
query = {'id': prod['title']}
result = scihub.search(query)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], prod)
def test_queries(self):
queries = [
# (name, query)
('S3', {'mission': 'Sentinel-3'}),
]
for name, q in queries:
with self.subTest(name=name):
server = scihub._auto_detect_server_from_query(
q, available_only=True)[0]
url = scihub._build_url(q, server=server)
response = scihub.get_response(url)
#
# Assert that queries for each mission return a
# status code 200 (OK)
#
self.assertEqual(response.status, 200)
with self.subTest('count entries'):
q = {'mission': 'Sentinel-3'}
server = scihub._auto_detect_server_from_query(
q, available_only=True)[0]
url = scihub._build_url(q, server=server)
html = scihub.resolve(url)
#
# Assert that the number of entries found on the page matches the
# number of entries requested per page.
#
self.assertEqual(html.count('<entry>'),
config.CONFIG['GENERAL']['ENTRIES'])
def test_temporal_queries(self):
with self.subTest('yesterday'):
file_list = scihub.search({'mission': 'Sentinel-3',
'time': 'yesterday'},
limit=200)
yesterday = DT.datetime.now(pytz.utc)-DT.timedelta(1)
today = DT.datetime.now(pytz.utc)
start = DT.datetime(yesterday.year, yesterday.month, yesterday.day,
tzinfo=pytz.utc)
end = DT.datetime(today.year, today.month, today.day,
tzinfo=pytz.utc)
for f in file_list:
#
# Assert that the ingestiondate of each entry was yesterday.
#
self.assertGreaterEqual(f['ingestiondate'], start)
self.assertLessEqual(f['ingestiondate'], end)
with self.subTest('today'):
file_list = scihub.search({'mission': 'Sentinel-3',
'time': 'today'},
limit=200)
today = DT.datetime.now(pytz.utc)
start = DT.datetime(today.year, today.month, today.day,
tzinfo=pytz.utc)
for f in file_list:
#
# Assert that the ingestiondate of each entry is today.
#
self.assertGreaterEqual(f['ingestiondate'], start)
#
# NOTE: This test presently fails because apparantly,
# SciHub's `intersects` parameter does not work reliably.
#
def test_spatial_queries(self):
loc, ref_coords = next(iter(config.CONFIG['LOCATIONS'].items()))
with self.subTest(location=loc):
file_list = scihub.search(
{'location': [loc], 'time': 'to 2017-09-01T00:00:00Z'},
server='S3', limit=20)
for f in file_list:
with self.subTest(product=f['filename']):
#
# Assert that the products indeed intersect the
# requested location.
#
distance = wkt_loads(f['coords']).distance(
wkt_loads(ref_coords))
utils.eprint('Distance: {}'.format(distance))
self.assertLessEqual(distance, 0.5)
def test_get_file_list(self):
q = {'mission': 'Sentinel-3'}
limit = 107
file_list = scihub.search(q, limit=limit)
#
# Assert that only `limit` entries are returned.
#
self.assertEqual(limit, len(file_list))
for f in file_list:
#
# Assert that each entry contains the attributes `url`, `uuid` and
# `filename`.
#
self.assertIn('url', f)
self.assertIn('uuid', f)
self.assertIn('filename', f)
# -----------------------------------------------------------------------------
# SCIHUB DOWNLOAD
# -----------------------------------------------------------------------------
class ScihubDownloadTestCase(TestCase):
@classmethod
def setUpClass(cls):
test_config.set_test_config()
def setUp(self):
test_config.clear_test_data()
def tearDown(self):
test_config.clear_test_data()
def test_download(self):
file_list = scihub.search({'query': SMALL_SIZE_QUERY}, limit=1)
for f in file_list:
with self.subTest(url=f['url']):
result = scihub.download(f)
#
# Assert that the download didn't fail and that
# the returned file path exists.
#
self.assertNotEqual(result, False)
self.assertTrue(os.path.isfile(result))
def test_download_many(self):
file_list = scihub.search({'query': SMALL_SIZE_QUERY},
limit=2)
scihub.download(file_list)
#
# Assert that all downloads were successful.
#
local_files = utils.ls(config.CONFIG['GENERAL']['DATA_DIR'])
local_files_identifiers = [os.path.splitext(os.path.split(_)[1])[0]
for _ in local_files]
for f in file_list:
self.assertIn(f['filename'], local_files_identifiers)
for f in local_files:
with self.subTest(file=f):
_, healthy, msg = check.check_file(f, mode='file')
utils.eprint(msg)
self.assertTrue(healthy)
def test_redownload(self):
test_config.copy_corrupt_data()
local_files = utils.ls(config.CONFIG['GENERAL']['DATA_DIR'])
scihub.redownload(local_files)
new_local_files = utils.ls(config.CONFIG['GENERAL']['DATA_DIR'])
self.assertEqual(set(local_files), set(new_local_files))
for f in local_files:
with self.subTest(file=f):
_, healthy, msg = check.check_file(f, mode='file')
utils.eprint(msg)
self.assertTrue(healthy)
# -----------------------------------------------------------------------------
# CHECK
# -----------------------------------------------------------------------------
class CheckTestCase(TestCase):
@classmethod
def setUpClass(cls):
test_config.set_test_config()
def setUp(self):
test_config.copy_test_data()
def tearDown(self):
test_config.clear_test_data()
def test_check_file_md5_healthy(self):
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the files check out in `md5` mode.
#
try:
file_path, healthy, message = \
check.check_file(f, mode='md5')
self.assertTrue(healthy)
except Exception as e:
self.fail('File check failed: {}'.format(e))
def test_check_file_zip_healthy(self):
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the files check out in `file` mode.
#
try:
file_path, healthy, message = \
check.check_file(f, mode='file')
self.assertTrue(healthy)
except Exception as e:
self.fail('File check failed: {}'.format(e))
def test_check_file_md5_corrupt(self):
test_config.clear_test_data()
test_config.copy_corrupt_data()
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the files are detected as corrupt in `md5` mode.
#
try:
file_path, healthy, message = \
check.check_file(f, mode='md5')
self.assertFalse(healthy)
except Exception as e:
self.fail('File check failed: {}'.format(e))
def test_check_file_zip_corrupt(self):
test_config.clear_test_data()
test_config.copy_corrupt_data()
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the files are detected as corrupt in `file` mode.
#
try:
file_path, healthy, message = \
check.check_file(f, mode='file')
self.assertFalse(healthy)
except Exception as e:
self.fail('File check failed: {}'.format(e))
# -----------------------------------------------------------------------------
# CHECKSUM
# -----------------------------------------------------------------------------
class ChecksumTestCase(TestCase):
@classmethod
def setUpClass(cls):
test_config.set_test_config()
def setUp(self):
test_config.copy_test_data()
def tearDown(self):
test_config.clear_test_data()
def test_md5(self):
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the md5 checksum returned by checksum.md5() is
# equal to the md5 sum returned by bash md5 or md5sum tool.
#
for exe in ['md5', 'md5sum']:
if utils._which(exe) is not None:
bash_output = subprocess.check_output([exe, f])
if not PY2:
bash_output = bash_output.decode()
bash_md5 = re.search('[a-zA-Z0-9]{32}',
bash_output).group()
break
self.assertEqual(
checksum.md5(f), bash_md5
)
def test_etag_small_files(self):
for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']):
with self.subTest(file=f):
#
# Assert that the computed etag is equal to the md5
# checksum for files smaller than the chunksize.
#
size_mb = max(10, int(os.path.getsize(f) / 1024**2))
self.assertEqual(
checksum.md5(f), checksum.etag(f, chunksize=2 * size_mb)
)
# def test_etag_large_files(self):
# pass
# -----------------------------------------------------------------------------
# MAIN
# -----------------------------------------------------------------------------
class MainTestCase(TestCase):
@classmethod
def setUpClass(cls):
test_config.set_test_config()
cls.check_mode = config.CONFIG['GENERAL']['CHECK_MODE']
config.CONFIG['GENERAL']['CHECK_MODE'] = 'file'
@classmethod
def tearDownClass(cls):
config.CONFIG['GENERAL']['CHECK_MODE'] = cls.check_mode
def setUp(self):
test_config.copy_test_data()
def tearDown(self):
test_config.clear_all()
def test_ls(self):
q = {'time': 'today', 'satellite': 'S3A',
'location': ['Ireland_Mace_Head']}
files = scihub.search(q)
result = main.ls(q)
self.assertEqual(len(result), len(files))
def test_get(self):
test_config.clear_test_data()
q = {'satellite': 'S3A', 'query': SMALL_SIZE_QUERY}
files = scihub.search(q, limit=2)
main.get(q, limit=2)
for f in files:
ext = '.zip'
with self.subTest(product=f['filename']):
self.assertTrue(
os.path.isfile(os.path.join(
config.CONFIG['GENERAL']['DATA_DIR'],
f['filename']) + ext)
)
def test_doctor(self):
test_config.copy_corrupt_data()
corrupt_files = utils.ls(test_config.TEST_DATA_DIR_CORRUPT,
path=False)
# healthy_files = utils.ls(test_config.TEST_DATA_DIR_ORIGINAL,
# path=False)
result = main.doctor()
bad_files = [os.path.split(status[0])[1]
for status in result if status[1] is False]
#
# Assert that the number of healthy/corrupt files detected are correct
#
self.assertEqual(len(bad_files), len(corrupt_files))
for corrupt_file in corrupt_files:
#
# Assert that each corrupt file has been registered.
#
self.assertIn(corrupt_file, bad_files)
def test_doctor_delete(self):
test_config.copy_corrupt_data()
corrupt_files = utils.ls(test_config.TEST_DATA_DIR_CORRUPT,
path=False)
healthy_files = utils.ls(test_config.TEST_DATA_DIR_ORIGINAL,
path=False)
main.doctor(delete=True)
#
# Assert that the corrupt files have been deleted.
#
for f in corrupt_files:
self.assertFalse(os.path.isfile(os.path.join(
config.CONFIG['GENERAL']['DATA_DIR'], f)))
#
# Assert that the healthy files have not been deleted.
#
for f in healthy_files:
self.assertTrue(os.path.isfile(os.path.join(
config.CONFIG['GENERAL']['DATA_DIR'], f)))
def test_doctor_repair(self):
test_config.copy_corrupt_data()
corrupt_files = utils.ls(test_config.TEST_DATA_DIR_CORRUPT,
path=False)
# healthy_files = utils.ls(test_config.TEST_DATA_DIR_ORIGINAL,
# path=False)
main.doctor(repair=True)
for f in corrupt_files:
repaired_f = os.path.join(config.CONFIG['GENERAL']['DATA_DIR'], f)
with self.subTest(file=repaired_f):
#
# Assert that each corrupt file has been repaired.
#
_, healthy, msg = check.check_file(repaired_f, mode='file')
utils.eprint(msg)
self.assertTrue(healthy)
# -----------------------------------------------------------------------------
# utils
# -----------------------------------------------------------------------------
class UtilsTestCase(TestCase):
def test_parse_datetime(self):
_dt = DT.datetime
dates = [
('Sep 5, 2016', (_dt(2016, 9, 5, 0, 0, 0),
_dt(2016, 9, 6, 0, 0, 0))),
('5 Sep 2016', (_dt(2016, 9, 5, 0, 0, 0),
_dt(2016, 9, 6, 0, 0, 0))),
('06/1998', (_dt(1998, 6, 1, 0, 0, 0),
_dt(1998, 7, 1, 0, 0, 0))),
('Jan 2018 to Oct 2018', (_dt(2018, 1, 1, 0, 0, 0),
_dt(2018, 11, 1, 0, 0, 0))),
('1 Jan 2018 to 30 Sep 2018', (_dt(2018, 1, 1, 0, 0, 0),
_dt(2018, 10, 1, 0, 0, 0))),
('12/2017', (_dt(2017, 12, 1, 0, 0, 0),
_dt(2018, 1, 1, 0, 0, 0))),
('2017/12', (_dt(2017, 12, 1, 0, 0, 0),
_dt(2018, 1, 1, 0, 0, 0))),
('2017/12 to 2018/12', (_dt(2017, 12, 1, 0, 0, 0),
_dt(2019, 1, 1, 0, 0, 0))),
('Jan 1, 2017, Jan 1, 2018', (_dt(2017, 1, 1, 0, 0, 0),
_dt(2018, 1, 2, 0, 0, 0))),
('to Jan 2018', (None, _dt(2018, 2, 1, 0, 0, 0))),
('2015 -', (_dt(2015, 1, 1, 0, 0, 0), None)),
('to 2017-09-01T00:00:00', (None, _dt(2017, 9, 1, 0, 0, 0)))
]
for date_str, date_obj in dates:
with self.subTest(date_str=date_str):
self.assertEqual(
utils.parse_datetime(date_str),
date_obj
)
|
StarcoderdataPython
|
4873691
|
from random import Random
from dataclasses import dataclass, InitVar
from typing import List, Dict, Optional, Set, Tuple, Iterable
from bson import ObjectId
from game.pkchess.character import Character
from game.pkchess.exception import (
MapTooFewPointsError, MapDimensionTooSmallError, MapShapeMismatchError, MapTooManyPlayersError,
MapPointUnspawnableError, SpawnPointOutOfMapError, NoPlayerSpawnPointError, UnknownResourceTypeError,
CoordinateOutOfBoundError, GamePlayerNotFoundError, MoveDestinationOutOfMapError, CenterOutOfMapError,
PathNotFoundError, PathSameDestinationError, PathEndOutOfMapError
)
from game.pkchess.flags import MapPointStatus, MapPointResource
from game.pkchess.objbase import BattleObject
from .mixin import ConvertibleMapMixin
__all__ = ("MapPoint", "MapCoordinate", "MapTemplate", "Map",)
@dataclass
class MapCoordinate:
"""Represents the coordinate of a map point."""
X: int
Y: int
def apply_offset(self, x_offset: int, y_offset: int) -> 'MapCoordinate':
"""
Return a new coordinate object which the offsets are applied.
Note that ``y_offset`` will be reversed to reflect the correct coordinate on the map.
> Map coordinate starts from left-top side. Right to increase X; **DOWN** to increase Y.
> However, offset starts from center. Right to increase X; **UP** to increase Y.
:param x_offset: offset for X
:param y_offset: offset for Y
:return: new offset-applied coordinate
:raises CoordinateOutOfBoundError: if the new coordinate will be negative value after applying the offsets
"""
new_obj = MapCoordinate(self.X + x_offset, self.Y - y_offset)
if new_obj.X < 0 or new_obj.Y < 0:
raise CoordinateOutOfBoundError()
return new_obj
def distance(self, other: 'MapCoordinate'):
return abs(self.X - other.X) + abs(self.Y - other.Y)
def __hash__(self):
return hash((self.X, self.Y))
def __eq__(self, other):
if not isinstance(other, MapCoordinate):
return False
return self.X == other.X and self.Y == other.Y
@dataclass
class MapPoint:
"""Represents a map point."""
status: MapPointStatus
coord: MapCoordinate
obj: Optional[BattleObject] = None
@dataclass
class MapTemplate(ConvertibleMapMixin):
"""
Map template.
This could be converted to :class:`MapModel` and
store to the database (initialize a game) by calling `to_model()`.
Set ``bypass_map_chack`` to ``True`` to bypass the available map point check and the size check.
This should be used only in tests.
"""
MIN_WIDTH = 9
MIN_HEIGHT = 9
MIN_AVAILABLE_POINTS = 81
width: int
height: int
points: List[List[MapPointStatus]]
resources: Dict[MapPointResource, List[MapCoordinate]]
bypass_map_chack: InitVar[bool] = False
def _check_map_dimension(self):
if self.width < MapTemplate.MIN_WIDTH or self.height < MapTemplate.MIN_HEIGHT:
raise MapDimensionTooSmallError()
def _check_available_points(self):
available = sum(sum(1 if p.is_map_point else 0 for p in row) for row in self.points)
if available < MapTemplate.MIN_AVAILABLE_POINTS:
raise MapTooFewPointsError(MapTemplate.MIN_AVAILABLE_POINTS, available)
def _check_player_spawn_point(self):
if not any(any(p == MapPointStatus.PLAYER for p in row) for row in self.points):
raise NoPlayerSpawnPointError()
def _check_resource_points(self):
for coords in self.resources.values():
for coord in coords:
x = coord.X
y = coord.Y
# Check out of map
if x >= self.width or y >= self.height:
raise SpawnPointOutOfMapError()
if not self.points[x][y].is_map_point:
raise MapPointUnspawnableError()
def _check_dimension_point_matrix(self):
try:
self.points[self.width - 1][self.height - 1]
except IndexError:
raise MapShapeMismatchError()
def __post_init__(self, bypass_map_chack: bool):
if bypass_map_chack:
return
self._check_map_dimension()
self._check_available_points()
self._check_player_spawn_point()
self._check_resource_points()
self._check_dimension_point_matrix()
def respawn(self):
pass # DRAFT: Game - game respawn object
def to_map(self, *,
players: Dict[ObjectId, Character] = None,
player_location: Dict[ObjectId, MapCoordinate] = None) \
-> 'Map':
pts = []
for x, pts_arr in enumerate(self.points):
arr = []
for y, pt in enumerate(pts_arr):
arr.append(MapPoint(pt, MapCoordinate(x, y)))
pts.append(arr)
return Map(self.width, self.height, pts, self.resources, self,
players=players, player_location=player_location)
@staticmethod
def load_from_file(path: str) -> Optional['MapTemplate']:
"""
Load the template from a map file.
This parsing method checks logic error, but not the format error.
If the map template is not found, return ``None``.
.. seealso::
See `doc/spec/map.md` for the specification of the map file.
:param path: path of the file
:return: a parsed `MapTemplate` or `None`
"""
try:
with open(path) as f:
lines = f.read().split("\n")
except FileNotFoundError:
return None
# Parse dimension
width, height = [int(n) for n in lines.pop(0).split(" ", 2)]
# Parse initial map points
points: List[List[MapPointStatus]] = [[] for _ in range(width)]
for y in range(height):
for x, elem in zip(range(width), lines.pop(0)):
points[x].append(MapPointStatus.cast(elem))
# parse resource spawning location
res_dict: Dict[MapPointResource, List[MapCoordinate]] = {}
for line in lines:
type_int, *coords = line.split(" ")
try:
res_type = MapPointResource.cast(type_int)
except ValueError:
raise UnknownResourceTypeError()
coords = [coord.split(",", 2) for coord in coords]
res_dict[res_type] = [MapCoordinate(int(x), int(y)) for x, y in coords]
return MapTemplate(width, height, points, res_dict)
@dataclass
class Map:
"""
Represents the map.
If ``players`` has any element in it and ``player_location`` is empty,
players will be randomly deployed to the point where the status is :class:`MapPointStatus.PLAYER`.
The rest of the deployable points will be replaced with :class:`MapPointStatus.EMPTY`.
If both ``player_location`` and ``players`` are given, ``players`` will be ignored.
"""
RANDOM = Random()
width: int
height: int
points: List[List[MapPoint]]
resources: Dict[MapPointResource, List[MapCoordinate]]
template: MapTemplate
player_location: Dict[ObjectId, MapCoordinate] = None
players: InitVar[Optional[Set[ObjectId]]] = None
def __post_init__(self, players: Dict[ObjectId, Character]):
if not self.player_location:
self.player_location = {}
else:
for coord in self.player_location.values():
self.points[coord.X][coord.Y].status = MapPointStatus.PLAYER
if not self.player_location and players:
player_coords: Set[MapCoordinate] = {pt.coord for pt in self.points_flattened
if pt.status == MapPointStatus.PLAYER}
player_actual_count = len(players)
player_deployable_count = len(player_coords)
if player_actual_count > player_deployable_count:
raise MapTooManyPlayersError(player_deployable_count, player_actual_count)
# Randomly deploy player to deployable location
# --- Cast to list to use `random.shuffle()` because dict and set usually is somehow ordered
players = list(players.items())
player_coords: List[MapCoordinate] = list(player_coords)
self.RANDOM.shuffle(player_coords)
self.RANDOM.shuffle(players)
while players:
player_oid, player_character = players.pop()
coord = player_coords.pop()
self.player_location[player_oid] = coord
self.points[coord.X][coord.Y].status = MapPointStatus.PLAYER
self.points[coord.X][coord.Y].obj = player_character
# Fill the rest of the deployable location to be empty spot
if self.player_location:
occupied_coords: Set[MapCoordinate] = set(self.player_location.values())
empty_player_coords: Set[MapCoordinate] = {pt.coord for pt in self.points_flattened
if pt.status == MapPointStatus.PLAYER}
empty_player_coords.difference_update(occupied_coords)
for coord in empty_player_coords:
self.points[coord.X][coord.Y].status = MapPointStatus.EMPTY
def player_move(self, player_oid: ObjectId, x_offset: int, y_offset: int, max_move: float) -> bool:
"""
Move the player using the given coordinate offset.
Returns ``False`` if the destination is not an empty spot. Otherwise, move the player and return ``True``.
``max_move`` will be rounded.
:param player_oid: OID of the player
:param x_offset: offset of X
:param y_offset: offset of Y
:param max_move: maximum count of the moves allowed
:return: if the movement succeed
:raises GamePlayerNotFoundError: the player to be moved not found
:raises MoveDestinationOutOfMapError: movement destination is out of map
:raises PathNotFoundError: path from the player's location to the destination not found
"""
# Check player existence
if player_oid not in self.player_location:
raise GamePlayerNotFoundError()
# Apply the movement offset
try:
origin = self.player_location[player_oid]
destination = self.player_location[player_oid].apply_offset(x_offset, y_offset)
except CoordinateOutOfBoundError:
raise MoveDestinationOutOfMapError()
# Check if the new coordinate is out of map
if destination.X >= self.width or destination.Y >= self.height:
raise MoveDestinationOutOfMapError()
original_point = self.points[origin.X][origin.Y]
new_point = self.points[destination.X][destination.Y]
# Check if the new coordinate is not empty
if new_point.status != MapPointStatus.EMPTY:
return False
# Check if the path is connected and the destination is empty
if not self.get_shortest_path(origin, destination, round(max_move)):
raise PathNotFoundError(origin, destination)
# Move the player & update related variables
self.player_location[player_oid] = destination
new_point.obj = original_point.obj
new_point.status = MapPointStatus.PLAYER
original_point.obj = None
original_point.status = self.template.points[origin.X][origin.Y]
if original_point.status == MapPointStatus.PLAYER:
original_point.status = MapPointStatus.EMPTY
return True
def get_shortest_path(self, origin: MapCoordinate, destination: MapCoordinate, max_length: int) \
-> Optional[List[MapCoordinate]]:
"""
Get the first-found shortest path from ``origin`` to ``destination``.
``max_length`` must be a positive integer. (No runtime check)
Returns ``None`` if
- the point status of ``destination`` is not ``MapPointStatus.EMPTY``.
- the path is not found.
- the path length is longer than ``max_length`` but not yet reached the ``destination``.
:param origin: origin of the path
:param destination: desired destination
:param max_length: max length of the path
:return: path from `origin` to `destination` if found. `None` on not found
:raises PathSameDestinationError: if `origin` and `destination` are the same
"""
if origin == destination:
raise PathSameDestinationError()
origin_out_of_map = origin.X >= self.width or origin.Y > self.height
destination_out_of_map = destination.X >= self.width or destination.Y > self.height
if origin_out_of_map or destination_out_of_map:
raise PathEndOutOfMapError(origin, destination, self.width, self.height)
# Check if the destination point is not empty
if self.points[destination.X][destination.Y].status != MapPointStatus.EMPTY:
return None
return self._get_shortest_path(origin, destination, max_length, [[origin]])
def _path_constructible(self, path: List[MapCoordinate], new_path_tail: MapCoordinate, destination: MapCoordinate):
if new_path_tail.X >= self.width or new_path_tail.Y >= self.height:
return False
tail = path[-1]
original_distance = tail.distance(destination)
new_distance = new_path_tail.distance(destination)
not_on_path = new_path_tail not in path
distance_le = new_distance <= original_distance
point_empty = self.points[new_path_tail.X][new_path_tail.Y].status == MapPointStatus.EMPTY
return not_on_path and distance_le and point_empty
def _get_shortest_path(self, origin: MapCoordinate, destination: MapCoordinate, max_length: int,
paths: List[List[MapCoordinate]] = None) -> Optional[List[MapCoordinate]]:
"""Helper method of ``self.get_shortest_path()``."""
new_paths = []
for path in paths:
# Extend every path to left, right, up, down
# Terminate if the current path without adding the new point is longer than allowed
if len(path) > max_length:
return None
tail = path[-1]
new_pts = [
tail.apply_offset(1, 0), # right
tail.apply_offset(-1, 0), # left
tail.apply_offset(0, 1), # up
tail.apply_offset(0, -1) # down
]
for new_pt in new_pts:
if self._path_constructible(path, new_pt, destination):
if new_pt == destination:
return path + [new_pt]
new_paths.append(path + [new_pt])
if not new_paths:
# Dead-end, returns `None`
return None
else:
# Search deeper
return self._get_shortest_path(origin, destination, max_length, new_paths)
def get_points(self, center: MapCoordinate, offsets: Iterable[Tuple[int, int]]) -> List[MapPoint]:
if center.X >= self.width or center.X < 0 or center.Y >= self.height or center.Y < 0:
raise CenterOutOfMapError()
pt_coord = []
for offset_x, offset_y in offsets:
try:
new_coord = center.apply_offset(offset_x, offset_y)
except CoordinateOutOfBoundError:
continue
if new_coord.X >= self.width or new_coord.Y >= self.height:
continue
pt_coord.append(new_coord)
pts = [self.points[coord.X][coord.Y] for coord in pt_coord]
return pts
@property
def points_flattened(self) -> List[MapPoint]:
"""
Get the 1D array of the points flattened from ``self.points``.
:return: flattened array of `self.points`
.. seealso::
https://stackoverflow.com/a/29244327/11571888
"""
return sum(self.points, [])
|
StarcoderdataPython
|
6622140
|
import capture
import getmap
import cutmap
if __name__ == '__main__':
# 59.9055,24.7385,60.3133,25.2727 赫尔基辛
# 60.1607,24.9191,60.1739,24.9700
# 60.16446,24.93824,60.16776,24.95096
# 60.1162,24.7522,60.3041,25.2466
name = "Helsinki"
tif_file = "google_17m.tif"
tfw_file = "google_17m.tfw"
# lat1, lon1, lat2, lon2 = 60.1162,24.7522,60.3041,25.2466
key_list = {
"landuse": ["residential"]
}
# # get tif
# x = getmap.getpic(lat1, lon1, lat2, lon2,
# 17, source='google', style='s', outfile=tif_file)
# getmap.my_file_out(x, tfw_file, "keep")
# get aoi and poi
capture.get_poi_aoi(name, key_list)
# cut tif
cutmap.cut_aoi(name + "_aoi.csv", name, tfw_file, tif_file)
|
StarcoderdataPython
|
3432851
|
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
EC2.TransitGateway
~~~~~~~~~~~~~~
AWS EC2 Transit Gateway interface
'''
# Third Party imports
from botocore.exceptions import ClientError
from cloudify.exceptions import NonRecoverableError, OperationRetry
# Local imports
from cloudify_aws.ec2 import EC2Base
from cloudify_aws.common import constants, decorators, utils
RESOURCE_TYPE = 'EC2 Transit Gateway'
TG = 'TransitGateway'
TGS = 'TransitGateways'
TG_ID = 'TransitGatewayId'
TG_IDS = 'TransitGatewayIds'
TG_ATTACHMENT = 'TransitGatewayVpcAttachment'
TG_ATTACHMENTS = 'TransitGatewayVpcAttachments'
TG_ATTACHMENT_ID = 'TransitGatewayAttachmentId'
TG_ATTACHMENT_IDS = 'TransitGatewayAttachmentIds'
FAILED = ['failed', 'failing']
AVAILABLE = ['available', 'pendingAcceptance']
PENDING = ['initiatingRequest', 'pending', 'modifying']
UNAVAILABLE = ['deleted',
'deleting',
'rollingBack',
'rejected',
'rejecting']
class EC2TransitGateway(EC2Base):
'''
EC2 Transit Gateway
'''
def __init__(self, ctx_node, resource_id=None, client=None, logger=None):
EC2Base.__init__(self, ctx_node, resource_id, client, logger)
self.type_name = RESOURCE_TYPE
@property
def properties(self):
'''Gets the properties of an external resource'''
params = {TG_IDS: [self.resource_id]}
try:
resources = \
self.client.describe_transit_gateways(**params)
except ClientError:
resources = None
return resources if not resources else resources.get(
TGS, [None])[0]
@property
def status(self):
'''Gets the status of an external resource'''
props = self.properties
if not props:
return None
return props['State']
def create(self, params):
'''
Create a new AWS EC2 Transit Gateway.
'''
return self.make_client_call('create_transit_gateway', params)
def delete(self, params=None):
'''
Deletes an existing AWS EC2 Transit Gateway.
'''
self.logger.debug('Deleting %s with parameters: %s'
% (self.type_name, params))
res = self.client.delete_transit_gateway(**params)
self.logger.debug('Response: %s' % res)
return res
class EC2TransitGatewayAttachment(EC2Base):
'''
EC2 Transit Gateway Attachment
'''
def __init__(self, ctx_node, resource_id=None, client=None, logger=None):
EC2Base.__init__(self, ctx_node, resource_id, client, logger)
self.type_name = RESOURCE_TYPE
@property
def properties(self):
'''Gets the properties of an external resource'''
params = {TG_ATTACHMENT_IDS: [self.resource_id]}
try:
resources = \
self.client.describe_transit_gateway_vpc_attachments(**params)
except ClientError:
pass
else:
return None if not resources else resources.get(
TG_ATTACHMENTS, [None])[0]
return None
@property
def status(self):
'''Gets the status of an external resource'''
props = self.properties
if not props:
return None
return props['State']
def create(self, params):
'''
Create a new AWS EC2 Transit Gateway Attachment.
'''
return self.make_client_call(
'create_transit_gateway_vpc_attachment', params)
def accept(self, params):
'''
Create a new AWS EC2 Transit Gateway Attachment.
'''
return self.make_client_call(
'accept_transit_gateway_vpc_attachment', params)
def delete(self, params=None):
'''
Deletes an existing AWS EC2 Transit Gateway Attachment.
'''
return self.make_client_call(
'delete_transit_gateway_vpc_attachment', params)
@decorators.aws_resource(EC2TransitGateway, resource_type=RESOURCE_TYPE)
def prepare(ctx, iface, resource_config, **_):
'''Prepares an AWS EC2 Transit Gateway'''
# Save the parameters
ctx.instance.runtime_properties['resource_config'] = resource_config
@decorators.aws_resource(EC2TransitGateway, RESOURCE_TYPE)
@decorators.wait_for_status(status_good=['available'],
status_pending=['pending'])
@decorators.tag_resources
def create(ctx, iface, resource_config, **_):
'''Creates an AWS EC2 Transit Gateway'''
params = utils.clean_params(
dict() if not resource_config else resource_config.copy())
# Actually create the resource
create_response = iface.create(params)[TG]
ctx.instance.runtime_properties['create_response'] = \
utils.JsonCleanuper(create_response).to_dict()
transit_gateway_id = create_response.get(TG_ID, '')
iface.update_resource_id(transit_gateway_id)
utils.update_resource_id(ctx.instance, transit_gateway_id)
@decorators.aws_resource(EC2TransitGateway, RESOURCE_TYPE,
ignore_properties=True)
@decorators.untag_resources
def delete(iface, resource_config, **_):
'''Deletes an AWS EC2 Transit Gateway'''
params = dict() if not resource_config else resource_config.copy()
if TG_ID not in params:
params.update({TG_ID: iface.resource_id})
iface.delete(params)
@decorators.aws_relationship(EC2TransitGatewayAttachment, RESOURCE_TYPE)
def request_vpc_attachment(ctx,
iface,
transit_gateway_id=None,
vpc_id=None,
subnet_ids=None,
**_):
transit_gateway_id = transit_gateway_id or \
ctx.source.instance.runtime_properties.get(
constants.EXTERNAL_RESOURCE_ID)
vpc_id = vpc_id or ctx.target.instance.runtime_properties.get(
constants.EXTERNAL_RESOURCE_ID)
subnet_ids = subnet_ids or ctx.target.instance.runtime_properties.get(
'subnets')
transit_gateway_attachment_id = \
get_attachment_id_from_runtime_props(ctx)
if not transit_gateway_id or not vpc_id:
raise NonRecoverableError(
'The "cloudify.relationships.aws.ec2.'
'attach_transit_gateway_to_vpc" relationship operation did not '
'receive a value for transit_gateway_id '
'({tgi}) or for vpc_id ({vi}).'.format(
tgi=transit_gateway_id, vi=vpc_id))
# If we are retrying then we have this ID.
# Normally, we could use the @decorators.wait_for_status decorator.
# However, because this is a relationship neither the source nor the target
# is an attachment type.
if transit_gateway_attachment_id:
iface = EC2TransitGatewayAttachment(
ctx.source.node,
transit_gateway_attachment_id,
iface.client,
ctx.logger)
if iface.status in AVAILABLE:
return
if iface.status in PENDING:
raise OperationRetry(
'The {r} creation request '
'has been received and is processing. State: {s}.'.format(
r=TG_ATTACHMENT, s=iface.status))
elif iface.status in UNAVAILABLE + FAILED:
raise NonRecoverableError(
'The {r} creation request '
'results in a fatal error: {s}'.format(
r=TG_ATTACHMENT,
s=iface.status))
else:
request = {
TG_ATTACHMENT_ID: transit_gateway_attachment_id
}
try:
iface.accept(request)
except (NonRecoverableError, ClientError) as e:
raise OperationRetry(
'Waiting for {t} to be in valid state: {s}. '
'Error={e}'.format(t=transit_gateway_attachment_id,
s=iface.status,
e=e))
request = {
TG_ID: transit_gateway_id,
'VpcId': vpc_id,
'SubnetIds': subnet_ids
}
response = iface.create(request)
ctx.logger.info('Sent the {r} creation request.'.format(
r=TG_ATTACHMENT))
ctx.source.instance.runtime_properties[TG_ATTACHMENTS][vpc_id] = \
utils.JsonCleanuper(response).to_dict()
@decorators.aws_relationship(EC2TransitGatewayAttachment, RESOURCE_TYPE)
def delete_vpc_attachment(ctx, iface, transit_gateway_attachment_id=None, **_):
transit_gateway_attachment_id = transit_gateway_attachment_id or \
get_attachment_id_from_runtime_props(ctx)
if not transit_gateway_attachment_id:
ctx.logger.error('No transit_gateway_attachment_id was provided. '
'Skipping delete attachment.')
return
iface = EC2TransitGatewayAttachment(
ctx.source.node,
transit_gateway_attachment_id,
iface.client,
ctx.logger)
request = {
TG_ATTACHMENT_ID: transit_gateway_attachment_id
}
if iface.status == 'deleting':
raise OperationRetry(
'The {r} deletion request has been received and is processing. '
'State: {s}.'.format(r=TG_ATTACHMENT, s=iface.status))
elif iface.status in UNAVAILABLE:
ctx.logger.info('The {r} has been deleted.'.format(
r=TG_ATTACHMENT))
return
iface.delete(request)
raise OperationRetry(
'Sent the {r} deletion request.'.format(r=TG_ATTACHMENT))
def get_attachment_id(props):
attachment = props.get(TG_ATTACHMENT, {})
return attachment.get(TG_ATTACHMENT_ID)
def get_attachment_id_from_runtime_props(ctx):
vpc_id = ctx.target.instance.runtime_properties.get(
constants.EXTERNAL_RESOURCE_ID)
if TG_ATTACHMENTS in ctx.source.instance.runtime_properties:
if vpc_id in ctx.source.instance.runtime_properties[TG_ATTACHMENTS]:
return ctx.source.instance.runtime_properties[
TG_ATTACHMENTS][vpc_id][TG_ATTACHMENT][TG_ATTACHMENT_ID]
else:
ctx.source.instance.runtime_properties[TG_ATTACHMENTS] = {}
|
StarcoderdataPython
|
6605313
|
#!/usr/bin/env python
import torch
import torchvision
import base64
import cupy
import cv2
import flask
import getopt
import gevent
import gevent.pywsgi
import glob
import h5py
import io
import math
import moviepy
import moviepy.editor
import numpy
import os
import random
import re
import scipy
import scipy.io
import shutil
import sys
import tempfile
import time
import zipfile
##########################################################
assert(int(str('').join(torch.__version__.split('.')[0:2])) >= 12) # requires at least pytorch version 1.2.0
torch.set_grad_enabled(False) # make sure to not compute gradients for computational performance
torch.backends.cudnn.enabled = True # make sure to use cudnn for computational performance
##########################################################
objCommon = {}
exec(open('./common.py', 'r').read())
exec(open('./models/disparity-estimation.py', 'r').read())
exec(open('./models/disparity-adjustment.py', 'r').read())
exec(open('./models/disparity-refinement.py', 'r').read())
exec(open('./models/pointcloud-inpainting.py', 'r').read())
##########################################################
print('large parts of this benchmark were adapted from <NAME>')
print('this implementation first downloads the official evaluation scripts')
print('the depth boundary error is currently different from the paper')
print('this is due to the official evaluation scripts being outdated')
##########################################################
abs_rel = [ numpy.nan ] * 1000
sq_rel = [ numpy.nan ] * 1000
rms = [ numpy.nan ] * 1000
log10 = [ numpy.nan ] * 1000
thr1 = [ numpy.nan ] * 1000
thr2 = [ numpy.nan ] * 1000
thr3 = [ numpy.nan ] * 1000
dde_0 = [ numpy.nan ] * 1000
dde_m = [ numpy.nan ] * 1000
dde_p = [ numpy.nan ] * 1000
dbe_acc = [ numpy.nan ] * 1000
dbe_com = [ numpy.nan ] * 1000
pe_fla = []
pe_ori = []
##########################################################
torch.hub.download_url_to_file('ftp://m1455541:<EMAIL>/evaluation_scripts.zip', './benchmark-ibims-scripts.zip')
objZip = zipfile.ZipFile('./benchmark-ibims-scripts.zip', 'r')
strScript = objZip.read('evaluation_scripts/evaluate_ibims_error_metrics.py').decode('utf-8')
strScript = strScript.replace('# exclude masked invalid and missing measurements', 'idx = gt!=0')
strScript = strScript.replace('gt=gt[gt!=0]', 'gt=gt[idx]')
strScript = strScript.replace('pred=pred[pred!=0]', 'pred=pred[idx]')
exec(strScript)
objZip.close()
##########################################################
torch.hub.download_url_to_file('ftp://m1455541:<EMAIL>/ibims1_core_mat.zip', './benchmark-ibims-data.zip')
objZip = zipfile.ZipFile('./benchmark-ibims-data.zip', 'r')
for intMat, strMat in enumerate([ strFile for strFile in objZip.namelist() if strFile.endswith('.mat') ]):
print(intMat, strMat)
objMat = scipy.io.loadmat(io.BytesIO(objZip.read(strMat)))['data']
tenImage = torch.FloatTensor(numpy.ascontiguousarray(objMat['rgb'][0][0][:, :, ::-1].transpose(2, 0, 1)[None, :, :, :].astype(numpy.float32) * (1.0 / 255.0))).cuda()
tenDisparity = disparity_estimation(tenImage)
tenDisparity = disparity_refinement(torch.nn.functional.interpolate(input=tenImage, size=(tenDisparity.shape[2] * 4, tenDisparity.shape[3] * 4), mode='bilinear', align_corners=False), tenDisparity)
tenDisparity = torch.nn.functional.interpolate(input=tenDisparity, size=(tenImage.shape[2], tenImage.shape[3]), mode='bilinear', align_corners=False) * (max(tenImage.shape[2], tenImage.shape[3]) / 256.0)
tenDepth = 1.0 / tenDisparity
valid = objMat['mask_transp'][0][0] * objMat['mask_invalid'][0][0] * (objMat['depth'][0][0] != 0.0)
pred = tenDepth[0, 0, :, :].cpu().numpy()
npyLstsqa = numpy.stack([pred[valid == 1.0].flatten(), numpy.full([int((valid == 1.0).sum().item())], 1.0, numpy.float32)], 1)
npyLstsqb = objMat['depth'][0][0][valid == 1.0].flatten()
npyScalebias = numpy.linalg.lstsq(npyLstsqa, npyLstsqb, None)[0]
pred = (pred * npyScalebias[0]) + npyScalebias[1]
abs_rel[intMat], sq_rel[intMat], rms[intMat], log10[intMat], thr1[intMat], thr2[intMat], thr3[intMat] = compute_global_errors((objMat['depth'][0][0] * valid).flatten(), (pred * valid).flatten())
dde_0[intMat], dde_m[intMat], dde_p[intMat] = compute_directed_depth_error((objMat['depth'][0][0] * valid).flatten(), (pred * valid).flatten(), 3.0)
dbe_acc[intMat], dbe_com[intMat] = compute_depth_boundary_error(objMat['edges'][0][0], pred)
if objMat['mask_wall_paras'][0][0].size > 0:
pe_fla_wall, pe_ori_wall = compute_planarity_error(objMat['depth'][0][0] * valid, pred * valid, objMat['mask_wall_paras'][0][0], objMat['mask_wall'][0][0] * valid, objMat['calib'][0][0])
pe_fla.extend(pe_fla_wall.tolist())
pe_ori.extend(pe_ori_wall.tolist())
# end
if objMat['mask_table_paras'][0][0].size > 0:
pe_fla_table, pe_ori_table = compute_planarity_error(objMat['depth'][0][0] * valid, pred * valid, objMat['mask_table_paras'][0][0], objMat['mask_table'][0][0] * valid, objMat['calib'][0][0])
pe_fla.extend(pe_fla_table.tolist())
pe_ori.extend(pe_ori_table.tolist())
# end
if objMat['mask_floor_paras'][0][0].size > 0:
pe_fla_floor, pe_ori_floor = compute_planarity_error(objMat['depth'][0][0] * valid, pred * valid, objMat['mask_floor_paras'][0][0], objMat['mask_floor'][0][0] * valid, objMat['calib'][0][0])
pe_fla.extend(pe_fla_floor.tolist())
pe_ori.extend(pe_ori_floor.tolist())
# end
# end
objZip.close()
##########################################################
print('abs_rel = ', numpy.nanmean(abs_rel))
print('sq_rel = ', numpy.nanmean(sq_rel))
print('rms = ', numpy.nanmean(rms))
print('log10 = ', numpy.nanmean(log10))
print('thr1 = ', numpy.nanmean(thr1))
print('thr2 = ', numpy.nanmean(thr2))
print('thr3 = ', numpy.nanmean(thr3))
print('dde_0 = ', numpy.nanmean(dde_0))
print('dde_m = ', numpy.nanmean(dde_m))
print('dde_p = ', numpy.nanmean(dde_p))
print('dbe_acc = ', numpy.nanmean(dbe_acc))
print('dbe_com = ', numpy.nanmean(dbe_com))
print('pe_fla = ', numpy.nanmean(pe_fla))
print('pe_ori = ', numpy.nanmean(pe_ori))
|
StarcoderdataPython
|
11322070
|
import datetime
import simplejson as json
import peewee
import redis
import os
from app.database import db
from app.settings import AppConfig
from app.models.FeeModel import FeeModel
from app.util.nanote import Nanote
from app.util.dateutil import format_js_iso
# Redis stores message counts, because postgres count is a slow operation
RD_COUNT_KEY = 'mtcount'
rd = redis.Redis(host=os.getenv('REDIS_HOST', 'localhost'))
class Message(db.Model):
block_hash = peewee.CharField(max_length=64, index=True)
address = peewee.CharField(max_length=64)
message_in_raw = peewee.CharField()
created_at = peewee.DateTimeField(default=datetime.datetime.utcnow(), index=True)
premium = peewee.BooleanField(default=False)
hidden = peewee.BooleanField(default=False, index=True)
destination = peewee.CharField(max_length=64)
class Meta:
db_table = 'messages'
@staticmethod
def validate_block(block: dict) -> tuple:
block_contents = json.loads(block['contents'])
"""Ensure a block is to the appropriate destination, of the minimum amount, etc"""
if block_contents['link_as_account'] != AppConfig.MONKEYTALKS_ACCOUNT:
return (False, "Transaction wasnt sent to MonkeyTalks account")
elif int(block['amount']) - FeeModel.get_fee() <= 0:
return (False, "Transaction amount wasn't enough to cover fee")
elif not Nanote().validate_message(block['amount']):
return (False, "Message has invalid checksum - can't be decoded")
return (True, "Valid")
@classmethod
def save_block_as_message(cls, block: dict):
block_contents = json.loads(block['contents'])
return cls.save_as_message(block['amount'], block['hash'], block_contents['account'], block_contents['link_as_account'])
@classmethod
def save_as_message(cls, amount: str, block_hash: str, account: str, destination: str):
premium = False
if int(amount) - FeeModel().get_premium_fee() > 0:
premium = True
message = Message(
block_hash=block_hash,
destination=destination,
message_in_raw=str(int(amount)),
created_at=datetime.datetime.utcnow(),
premium=premium,
address=account
)
if message.save() > 0:
message.inc_message_count()
return message
return None
def inc_message_count(self) -> int:
"""Increment message count for a particular account and
return the new count"""
old_count = rd.hget(self.address, RD_COUNT_KEY)
if old_count is None:
rd.hset(self.address, RD_COUNT_KEY, '1')
return 1
else:
old_count = int(old_count.decode('utf-8'))
old_count += 1
rd.hset(self.address, RD_COUNT_KEY, str(old_count))
return old_count
def get_message_count(self) -> int:
"""Retrieve message count for a particular account"""
count = rd.hget(self.address, RD_COUNT_KEY)
if count is None:
return 0
return int(count.decode('utf-8'))
@staticmethod
def format_message(message) -> dict:
"""Format a message in json to send to the UI"""
message_json = {
'id': message.id,
'content': message.message_in_raw,
'date': format_js_iso(message.created_at),
'premium': message.premium,
'address': message.address,
'count': message.get_message_count()
}
return message_json
@staticmethod
def format_message_advanced(message) -> dict:
"""Format a message in json to send to the UI"""
message_json = {
'id': message.id,
'content': message.message_in_raw,
'date': format_js_iso(message.created_at),
'premium': message.premium,
'address': message.address,
'count': message.get_message_count(),
'block_hash': message.block_hash
}
return message_json
|
StarcoderdataPython
|
6529685
|
from argparse import Namespace
def namespace(d):
assert isinstance(d, dict)
return Namespace(**d)
class FeedArgsDict:
def __init__(self, func, args={}, force_return=None):
assert callable(func)
args = namespace(args)
self.func = func
self.args = args
self.force_return = force_return
def __call__(self, *args_ignore, **kwargs_ignore):
func = self.func
args = self.args
force_return = self.force_return
output = func(args)
if force_return is not None:
return force_return
return output
|
StarcoderdataPython
|
5114391
|
import numpy as np
import colorsys
def get_colors(num_colors):
colors=[]
for i in np.arange(0., 360., 360. / num_colors):
hue = i/360.
lightness = (50 + np.random.rand() * 10)/100.
saturation = (90 + np.random.rand() * 10)/100.
colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))
return colors
color_list = [
# some example colors
'#F4561D',
'#F1911E',
'#F1BD1A',
# 16 color list
'#AD2323',
'#2A4BD7',
'#1D6914',
'#814A19',
'#8126C0',
'#A0A0A0',
'#81C57A',
'#9DAFFF',
'#29D0D0',
'#FF9233',
'#FFEE33',
'#E9DEBB',
'#FFCDF3',
'#FFFFFF',
'#575757',
'#000000'
]
|
StarcoderdataPython
|
6683310
|
import datetime
import os
from django.test import Client, TestCase
from django.db import transaction
from django.db.utils import IntegrityError
from django.contrib.auth import get_user_model
from django.urls import reverse
from decimal import Decimal
from formsaurus.models import (
Survey, Submission, FileUploadAnswer)
User = get_user_model()
class FileUploadTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(
'john',
'<EMAIL>',
'johnpassword')
self.client = Client()
def test_required_file_upload(self):
survey = Survey.objects.create(
name='Test Survey',
user=self.user,
published=True,
)
survey.add_file_upload(
'Photo?',
required=True,
)
# Get submission
response = self.client.get(
reverse('formsaurus:survey', args=[survey.id]))
self.assertEqual(response.status_code, 302)
submission = Submission.objects.get(survey=survey)
self.assertIsNotNone(submission)
question = survey.first_question
self.assertIsNotNone(question)
response = self.client.get(reverse('formsaurus:question', args=[
survey.id, question.id, submission.id]))
self.assertEqual(response.status_code, 200)
# Empty
response = self.client.post(reverse('formsaurus:question', args=[
survey.id, question.id, submission.id]))
self.assertEqual(response.status_code, 200)
answers = submission.answers()
self.assertEqual(0, len(answers))
# Send File
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
with open(os.path.join(BASE_DIR, 'tests/yes_no.py')) as fp:
response = self.client.post(reverse('formsaurus:question', args=[
survey.id, question.id, submission.id]), {'file': fp})
self.assertEqual(response.status_code, 302)
answers = submission.answers()
self.assertEqual(1, len(answers))
answer = answers[0]
self.assertTrue(isinstance(answer, FileUploadAnswer))
self.assertIsNotNone(answer.file)
self.assertEqual(answer.file, f'files/survey/{survey.id}/{submission.id}/yes_no.py')
|
StarcoderdataPython
|
127395
|
from PIL import ImageDraw, Image
import numpy as np
import hashlib
import random
# array_list = [1]
background_color = '#F2F1F2'
colors = ['#CD00CD', 'Red', 'Orange', "#66FF00", "#2A52BE"]
def generate_array(bytes):
## Generate array
for i in range(100):
# Array 6 * 12
need_array = np.array([bit == '1' for byte in bytes[3:3 + 9] for bit in bin(byte)[2:].zfill(8)]).reshape(6, 12)
# Get full array 12 * 12
need_array = np.concatenate((need_array, need_array[::-1]), axis=0)
for i in range(12):
need_array[0, i] = 0
need_array[11, i] = 0
need_array[i, 0] = 0
need_array[i, 11] = 0
return need_array
def generate_pyxies(pyxie_size: int, s: str) -> None:
bytes = hashlib.md5(s.encode('utf-8')).digest()
need_color = generate_array(bytes)
## Draw image
img_size = (pyxie_size, pyxie_size)
block_size = pyxie_size // 12 # Size
img = Image.new('RGB', img_size, background_color)
draw = ImageDraw.Draw(img)
for x in range(pyxie_size):
for y in range(pyxie_size):
need_to_paint = need_color[x // block_size, y // block_size]
if need_to_paint:
draw.point((x, y), random.choice(colors))
format = 'jpeg'
path = f'CryptoPyxie_{s}.{format}'
img.save(path, format)
if __name__ == "__main__":
# argv[1] - size of pictures (pixels)
# argv[2] - int - hash generate
from sys import argv
cryptopyxie_size = int(argv[1])
cryptopyxie_name = argv[2]
cycle = generate_pyxies(int(cryptopyxie_size // 12 * 12), cryptopyxie_name)
|
StarcoderdataPython
|
8013526
|
from datetime import datetime
import merra_urls
import threaded_downloader
urls = merra_urls.url_generator(
time_interval=(datetime(2020, 3, 30), datetime(2020, 3, 31)),
lat_interval=(26, 37),
lon_interval=(-107, -93),
collections=[
{
"collection": "tavg1_2d_slv_Nx",
"short_name": "M2T1NXSLV",
"fields": ["PS", "T10M", "U50M", "V50M"],
}
],
)
dl = threaded_downloader.DownloadManager()
dl.download_path = "./sample_downloads/"
dl.download_urls = list(urls)
dl.start_download()
|
StarcoderdataPython
|
3566733
|
import unittest
from orderedattrdict import AttrDict
from gramex.handlers.basehandler import check_membership
def check(auth, **kwargs):
return
class TestMembership(unittest.TestCase):
'''Test check_membership'''
def check(self, condition, **kwargs):
user = AttrDict(current_user=AttrDict(kwargs))
self.assertEqual(all(self.auth(user)), condition)
def test_no_condition(self):
self.auth = check_membership([])
# No conditions actually will NOT allow any user through.
# If there are no conditions in gramex.yaml, we shouldn't be adding a check!
self.check(False)
def test_single_key(self):
self.auth = check_membership([{'name': 'a'}])
self.check(True, name='a')
self.check(True, name=['x', 'a', 'y'])
self.check(False, name='b')
self.check(False)
def test_nested_key(self):
self.auth = check_membership([{'name.first': 'a'}])
self.check(True, name={'first': 'a', 'last': 'b'})
self.check(True, name={'first': ['x', 'a', 'y'], 'last': 'b'})
self.check(False)
self.check(False, name={})
self.check(False, name={'first': ''})
self.check(False, name={'first': 'b'})
self.check(False, name={'last': 'a'})
def test_multi_key(self):
self.auth = check_membership([{'name': 'a', 'gender': 'm'}])
self.check(True, name='a', gender='m')
self.check(True, name=['x', 'a', 'y'], gender=['', 'x', 'm', 'f'])
self.check(False, name='b', gender='m')
self.check(False, name='b', gender=['m', ''])
self.check(False, name='a', gender='f')
self.check(False, name=['a', ''], gender='f')
self.check(False, name='b', gender='f')
self.check(False, name=[''], gender=[''])
def test_multi_cond(self):
self.auth = check_membership([{'name': 'a', 'gender': 'm'}, {'name': 'b', 'gender': 'f'}])
self.check(True, name='a', gender='m')
self.check(True, name=['a', 'b'], gender=['m', 'f'])
self.check(True, name='b', gender='f')
self.check(True, name=['a', 'x'], gender=['m', ''])
self.check(True, name=['x', 'b'], gender='f')
self.check(False)
self.check(False, name='a', gender='f')
self.check(False, name='b', gender='m')
self.check(False, name='', gender='')
|
StarcoderdataPython
|
6501707
|
# Generated by Django 2.1.7 on 2019-02-23 03:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('beers', '0015_auto_20190221_2323'),
]
operations = [
migrations.AddField(
model_name='beer',
name='stem_and_stein_pk',
field=models.PositiveIntegerField(blank=True, null=True, unique=True),
),
]
|
StarcoderdataPython
|
241452
|
<filename>draft/wifi/client_udp_video.py<gh_stars>0
#!/usr/bin/env python3
import cv2, socket, base64, numpy as np
server_address = ("127.0.0.1", 9999)
buff_size = 65536 # max buffer size
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, buff_size)
init_msg = b"Init video transmission from server by this message"
client_socket.sendto(init_msg, server_address)
while True:
msg = client_socket.recv(buff_size)
# data = base64.b64decode(msg, ' /')
data = msg
npdata = np.fromstring(data, dtype=np.uint8)
frame = cv2.imdecode(npdata, 1) # 1 means return image as is
cv2.imshow("RECEIVING VIDEO", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
client_socket.close()
break
|
StarcoderdataPython
|
5131659
|
import logging
import requests
from base64 import b64decode
from hashlib import sha1
from onelogin.saml2.utils import OneLogin_Saml2_Utils
from onelogin.saml2.xml_templates import OneLogin_Saml2_Templates
from onelogin.saml2.constants import OneLogin_Saml2_Constants
from onelogin.saml2.ssl_adapter import SSLAdapter
from .errors import OneLogin_Saml2_ValidationError
logger = logging.getLogger(__name__)
def parse_saml2_artifact(artifact):
#
# SAMLBind - See 3.6.4 Artifact Format, for SAMLart format.
#
decoded = b64decode(artifact)
type_code = b'\x00\x04'
if decoded[:2] != type_code:
raise OneLogin_Saml2_ValidationError(
"The received Artifact does not have the correct header.",
OneLogin_Saml2_ValidationError.WRONG_ARTIFACT_FORMAT
)
index = str(int.from_bytes(decoded[2:4], byteorder="big"))
sha1_entity_id = decoded[4:24]
message_handle = decoded[24:44]
return index, sha1_entity_id, message_handle
class Artifact_Resolve_Request:
def __init__(self, settings, saml_art):
self._settings = settings
self.soap_endpoint = self.find_soap_endpoint(saml_art)
self.saml_art = saml_art
sp_data = self._settings.get_sp_data()
uid = OneLogin_Saml2_Utils.generate_unique_id()
self._id = uid
issue_instant = OneLogin_Saml2_Utils.parse_time_to_SAML(OneLogin_Saml2_Utils.now())
request = OneLogin_Saml2_Templates.ARTIFACT_RESOLVE_REQUEST % \
{
'id': uid,
'issue_instant': issue_instant,
'entity_id': sp_data['entityId'],
'artifact': saml_art
}
self.__artifact_resolve_request = request
def find_soap_endpoint(self, saml_art):
idp = self._settings.get_idp_data()
index, sha1_entity_id, message_handle = parse_saml2_artifact(saml_art)
if sha1_entity_id != sha1(idp['entityId'].encode('utf-8')).digest():
raise OneLogin_Saml2_ValidationError(
f"The sha1 hash of the entityId returned in the SAML Artifact ({sha1_entity_id})"
f"does not match the sha1 hash of the configured entityId ({idp['entityId']})"
)
for ars_node in idp['artifactResolutionService']:
if ars_node['binding'] != "urn:oasis:names:tc:SAML:2.0:bindings:SOAP":
continue
if ars_node['index'] == index:
return ars_node
return None
def get_soap_request(self):
request = OneLogin_Saml2_Templates.SOAP_ENVELOPE % \
{
'soap_body': self.__artifact_resolve_request
}
return OneLogin_Saml2_Utils.add_sign(
request,
self._settings.get_sp_key(), self._settings.get_sp_cert(),
key_passphrase=self._settings.get_sp_key_passphrase(),
sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA256,
digest_algorithm=OneLogin_Saml2_Constants.SHA256,
)
def send(self):
security_data = self._settings.get_security_data()
headers = {"content-type": "application/soap+xml"}
url = self.soap_endpoint['url']
data = self.get_soap_request()
logger.debug(
"Doing a ArtifactResolve (POST) request to %s with data %s",
url, data
)
session = requests.session()
ssl_adapter = SSLAdapter(
security_data['soapClientCert'],
security_data['soapClientKey'],
security_data.get('soapClientPassphrase', None),
)
session.mount(url, ssl_adapter)
return session.post(
url=url,
data=data,
headers=headers,
)
def get_id(self):
"""
Returns the ArtifactResolve ID.
:return: ArtifactResolve ID
:rtype: string
"""
return self._id
|
StarcoderdataPython
|
1955650
|
<filename>main.py<gh_stars>1-10
# Importing necessary modules
import time
import pyttsx3
import os
import itertools
# Setting up a few properties
ROOT_DIR = os.getcwd()
POEM_DIR = ROOT_DIR + "\\poems"
os.chdir(POEM_DIR)
FILES_LIST = os.listdir()
speaker = pyttsx3.init()
speaker.setProperty("rate",150)
# An important function
def print_speak(msg, newline=True):
if not newline:
print(msg, end="")
else:
print(msg)
speaker.say(msg)
speaker.runAndWait()
# Pre Tasks
msg = "Hey there!\n"
print_speak(msg)
msg = "Choose a poem from below:"
print_speak(msg)
time.sleep(0.4)
num = itertools.count()
for poem in FILES_LIST:
print("Poem " + str(next(num)) + ": " + poem[:-4])
time.sleep(0.1)
msg = "Input the poem number: "
print_speak(msg, newline=False)
POEM_NUM = input()
# Main Tasks
while True:
print("\n")
FILE_NAME = FILES_LIST[int(POEM_NUM)]
temp = POEM_NUM
with open(FILE_NAME, "r") as rfile:
content = rfile.read()
line_list = content.split("\n")
for line in line_list:
print(line)
speaker.say(line)
speaker.runAndWait()
print("\n")
time.sleep(2)
msg = "Input 'n' if you want to hear the next poem"
print_speak(msg)
msg = "Input 'a' if you want to quit"
print_speak(msg)
msg = "Input the poem number: "
print_speak(msg, newline=False)
POEM_NUM = input().lower()
if POEM_NUM == "n":
POEM_NUM = (int(temp) + 1) % len(FILES_LIST)
if POEM_NUM == "a":
break
# Post Tasks
print("\n")
time.sleep(2)
msg = "Thanks for listening."
print_speak(msg)
msg = "Created by <NAME> & <NAME>"
print_speak(msg)
time.sleep(3)
|
StarcoderdataPython
|
9774598
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET # Use cElementTree or lxml if too slow
import audit as project_audit
import data as project_data
from collections import defaultdict
def get_element(osm_file, tags=('node', 'way', 'relation')):
"""Yield element if it is the right type of tag
Reference:
http://stackoverflow.com/questions/3095434/inserting-newlines-in-xml-file-generated-via-xml-etree-elementtree-in-python
"""
context = ET.iterparse(osm_file, events=('start', 'end'))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
# Get a sample from the full OSM file by writing one of every 20 elements
def get_sample(input_file, output_file):
with open(output_file, 'wb') as output:
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
output.write('<osm>\n ')
# Write every 20th top level element
for i, element in enumerate(get_element(input_file)):
if i % 20 == 0:
output.write(ET.tostring(element, encoding='utf-8'))
output.write('</osm>')
# Audit the OSM file.
# This function returns:
# - a dictionary of street names keyed by street type, and
# - a dictionary of counts keyed by street type
def audit2(osmfile):
osm_file = open(osmfile, "r")
street_types = defaultdict(set)
for event, elem in ET.iterparse(osm_file, events=("start",)):
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if project_audit.is_street_name(tag):
project_audit.audit_street_type(street_types, tag.attrib['v'])
type_count = {}
for s in street_types:
if s not in type_count:
type_count[s] = 0
type_count[s] = len(street_types[s])
return street_types, type_count
# Convert the input OSM file to JSON file.
# The process will replace abbreviated street types to full name.
# For example, 'Taikoo Shing Rd' -> 'Taikoo Shing Road'
def convert_map(input_file):
project_data.process_map(input_file, True)
|
StarcoderdataPython
|
1809080
|
# Generated by Django 1.9.5 on 2016-04-23 13:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("djangocms_page_meta", "0004_auto_20160409_1852"),
]
operations = [
migrations.AddField(
model_name="pagemeta",
name="fb_pages",
field=models.CharField(blank=True, default="", max_length=255, verbose_name="Facebook Pages ID"),
),
]
|
StarcoderdataPython
|
1919742
|
<gh_stars>1-10
import numpy as np
from collections import Counter
from pathlib import Path
from typing import Iterable, List
Sample_Input = """3,4,3,1,2
"""
def parse_input(input: str) -> tuple:
lines = input.strip().split(",")
return list(map(int, lines))
def simulate(fish: Iterable[int], days: int) -> int:
"""Don't brute force it, use math
Resources
https://numpy.org/doc/stable/reference/routines.linalg.html
https://numpy.org/doc/stable/reference/generated/numpy.linalg.matrix_power.html#numpy.linalg.matrix_power
"""
cache = Counter(fish)
init = np.array([cache[i] for i in range(9)])
A = np.array(
[
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
B = np.linalg.matrix_power(A, days)
return B.dot(init).sum()
def brute_force_simulate(fish: Iterable[int], days: int) -> int:
d = np.array(fish)
for _ in range(days):
d = brute_force_simulate_one_day(d)
return d
def brute_force_simulate_one_day(fish: Iterable[int]) -> List[int]:
new_fish = fish.size - np.count_nonzero(fish)
fish[fish == 0] = 7
fish = np.subtract(fish, 1)
fish = np.append(fish, [8] * new_fish)
return fish
if __name__ == "__main__":
fish_input = (Path.cwd() / "2021" / "data" / f"{Path(__file__).stem}_input.txt").read_text()
initial_fish = parse_input(fish_input)
print("========= NEW ============")
days = 18
simulated_fish = simulate(list(initial_fish), days=days)
print(f"Starting with {len(initial_fish):,} fish, after {days} days the projection is {simulated_fish:,}")
days = 256
simulated_fish = simulate(list(initial_fish), days=days)
print(f"Starting with {len(initial_fish):,} fish, after {days} days the projection is {simulated_fish:,}")
|
StarcoderdataPython
|
3232313
|
<filename>functions.py
# file with functions used in Main.py
import pycountry, sys
def date_change(date):
'''This function creates a new date in expected output format
and detects errors in given "date" parameter.
Example: MM/DD/YYYY to YYYY-MM-DD'''
if not date: # detect empty 'date' parameter
sys.stderr.write('STDERR: Date not found. Script will continue.\n')
return 'XXXX-XX-XX'
elif len(date) != 10: # detect date format
sys.stderr.write('STDERR: Date format invalid. Script will continue.\n')
return 'XXXX-XX-XX'
else: # change date format
new_date = date.split('/')
new_date.insert(0, new_date.pop(-1)) # change position of the year to 0 position in list
new_date = '-'.join(new_date) # create YYYY-MM-DD format
return new_date
def get_country_code(name):
'''This function searches for aplha_3 country code (DEU, POL, GIN)
by using name of subdivision as a parameter, and detects errors.
Example: Berlin to DEU'''
if not name: # detect empty 'name' parameter.
sys.stderr.write('STDERR: Error: subdivision name field is empty.\n')
return 'XXX'
else: # search for country code
for subdiv in list(pycountry.subdivisions): # iterate trough subdivision list
if name in subdiv.name:
code = subdiv.country_code # code is also alpha_2 code of a country
country = pycountry.countries.get(alpha_2 = code)
return country.alpha_3
return 'XXX' # if function didn't find nothing, return 'XXX'
def impress_check(impress):
'''This function checks impressions column format
for errors and invalid formats.
Example: Unknown to 0'''
if not impress: # detect empty impress field
sys.stderr.write('STDERR: Error: impressions field is empty.\n')
return '0'
elif type(impress) == 'str': # detect invalid parameter type
sys.stderr.write('STDERR: Error: impressions field format invalid.\n')
return '0'
else:
return impress
def calculate_clicks(impress, CTR):
'''This function calculate number of clicks from CTR (Click To
impression Rate) and number of impressions. Also detects errors.
Example: 916, 0.67% to 6'''
if not impress or not CTR: # detect empty 'CTR' parameter.
sys.stderr.write('STDERR: Error: CTR field is empty. Can\'t calculate clicks.\n')
return '0'
else: # calculate clicks
try:
a = float(CTR[:-1])
b = float(impress)
clicks = b * (a/100)
return round(clicks)
except ValueError as error:
sys.stderr.write('STDERR: Error calculating clicks, {}\n'.format(error))
return '0'
|
StarcoderdataPython
|
5139802
|
from starlette.middleware.base import BaseHTTPMiddleware
from core.logger import logger
# 自定义访问日志中间件,基于BaseHTTPMiddleware的中间件实例
class RequestLoggerMiddleware(BaseHTTPMiddleware):
# dispatch 必须实现
async def dispatch(self, request, call_next):
logger.info(f"{request.method} url:{request.url}\nheaders: {request.headers.get('user-agent')}"
f"\nIP:{request.client.host}")
response = await call_next(request)
return response
|
StarcoderdataPython
|
389596
|
<reponame>tarunvelagala/python-75-hackathon<filename>date-time-1.py
# HANDLING DURATION
from datetime import *
dt = datetime(2018, 12, 20, 13, 2, 10)
duration = timedelta(days=15, hours=10, minutes=30)
print(dt+duration)
print(dt-duration)
# to measure the time taken by the program
from time import *
t1 = perf_counter()
i, sum = 0, 0
while(i < 1000000):
sum = sum+i
i += 1
sleep(3)
t2 = perf_counter()
print('execution time= %f seconds' % (t2-t1))
|
StarcoderdataPython
|
1648989
|
'''
@Author: <NAME>
@Date: 2021-01-07 15:04:21
@Description: 这个是训练 mixed model 的时候使用的
@LastEditTime: 2021-02-06 22:40:20
'''
import os
import numpy as np
import time
import torch
from torch import nn, optim
from TrafficFlowClassification.TrafficLog.setLog import logger
from TrafficFlowClassification.utils.setConfig import setup_config
# 下面是一些可以使用的模型
from TrafficFlowClassification.models.resnet1d_ae import resnet_AE
from TrafficFlowClassification.data.dataLoader import data_loader
from TrafficFlowClassification.data.tensordata import get_tensor_data
from TrafficFlowClassification.utils.helper import adjust_learning_rate, save_checkpoint
from TrafficFlowClassification.utils.evaluate_tools import display_model_performance_metrics
# 针对这个训练修改的 train process
from TrafficFlowClassification.utils.helper import AverageMeter, accuracy
mean_val = np.array([2.86401660e-03, 0.00000000e+00, 3.08146750e-03, 1.17455448e-02,
5.75561597e-03, 6.91365004e-04, 6.64955585e-02, 2.41380099e-02,
9.75861990e-01, 0.00000000e+00, 2.89814456e+02, 6.42617944e+01,
6.89227965e+00, 2.56964887e+02, 1.36799462e+02, 9.32648320e+01,
7.83185943e+01, 1.32048335e+02, 2.09555592e+01, 1.70122810e-02,
6.28544986e+00, 3.27195426e-03, 3.60230735e+01, 9.15340653e+00,
2.17694894e-06, 7.32748605e+01])
std_val = np.array([3.44500263e-02, 0.00000000e+00, 3.09222563e-02, 8.43027570e-02,
4.87519125e-02, 1.48120354e-02, 2.49138903e-01, 1.53477827e-01,
1.53477827e-01, 0.00000000e+00, 8.48196659e+02, 1.94163550e+02,
1.30259798e+02, 7.62370125e+02, 4.16966374e+02, 1.25455838e+02,
2.30658312e+01, 8.78612984e+02, 1.84367543e+02, 1.13978421e-01,
1.19289813e+02, 1.45965914e-01, 8.76535415e+02, 1.78680040e+02,
4.91812227e-04, 4.40298923e+03]) + 0.001
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
mean_val = torch.from_numpy(mean_val).float().to(device)
std_val = torch.from_numpy(std_val).float().to(device)
def train_process(train_loader, model, alpha, criterion_c, criterion_r, optimizer, epoch, device, print_freq):
"""训练一个 epoch 的流程
Args:
train_loader (dataloader): [description]
model ([type]): [description]
criterion_c ([type]): 计算分类误差
criterion_l ([type]): 计算重构误差
optimizer ([type]): [description]
epoch (int): 当前所在的 epoch
device (torch.device): 是否使用 gpu
print_freq ([type]): [description]
"""
c_loss = AverageMeter()
r_loss = AverageMeter()
losses = AverageMeter() # 在一个 train loader 中的 loss 变化
top1 = AverageMeter() # 记录在一个 train loader 中的 accuracy 变化
model.train() # 切换为训练模型
for i, (pcap, statistic, target) in enumerate(train_loader):
pcap = (pcap/255).to(device) # 也要归一化
statistic = statistic.to(device)
statistic = (statistic - mean_val)/std_val # 首先需要对 statistic 的数据进行归一化
target = target.to(device)
classific_result, fake_statistic = model(pcap, statistic) # 分类结果和重构结果
loss_c = criterion_c(classific_result, target) # 计算 分类的 loss
loss_r = criterion_r(statistic, fake_statistic) # 计算 重构 loss
loss = alpha * loss_c + loss_r # 将两个误差组合在一起
# 计算准确率, 记录 loss 和 accuracy
prec1 = accuracy(classific_result.data, target)
c_loss.update(loss_c.item(), pcap.size(0))
r_loss.update(loss_r.item(), pcap.size(0))
losses.update(loss.item(), pcap.size(0))
top1.update(prec1[0].item(), pcap.size(0))
# 反向传播
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % print_freq == 0:
logger.info(
'Epoch: [{0}][{1}/{2}], Loss {loss.val:.4f} ({loss.avg:.4f}), Loss_c {loss_c.val:.4f} ({loss_c.avg:.4f}), Loss_r {loss_r.val:.4f} ({loss_r.avg:.4f}), Prec@1 {top1.val:.3f} ({top1.avg:.3f})'
.format(epoch,
i,
len(train_loader),
loss=losses,
loss_c=c_loss,
loss_r=r_loss,
top1=top1))
def validate_process(val_loader, model, device, print_freq):
top1 = AverageMeter()
model.eval() # switch to evaluate mode
for i, (pcap, statistic, target) in enumerate(val_loader):
pcap = (pcap/255).to(device) # 也要归一化
statistic = statistic.to(device)
statistic = (statistic - mean_val)/std_val # 首先需要对 statistic 的数据进行归一化
target = target.to(device)
with torch.no_grad():
output, _ = model(pcap, statistic) # compute output
# measure accuracy and record loss
prec1 = accuracy(output.data, target)
top1.update(prec1[0].item(), pcap.size(0))
if (i + 1) % print_freq == 0:
logger.info('Test: [{0}/{1}], Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.
format(i, len(val_loader), top1=top1))
logger.info(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def CENTIME_train_pipeline(alpha):
cfg = setup_config() # 获取 config 文件
logger.info(cfg)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info('是否使用 GPU 进行训练, {}'.format(device))
model_path = os.path.join(cfg.train.model_dir, cfg.train.model_name) # 模型的路径
model = resnet_AE(model_path, pretrained=False, num_classes=12).to(device) # 初始化模型
criterion_c = nn.CrossEntropyLoss() # 分类用的损失函数
criterion_r = nn.L1Loss() # 重构误差的损失函数
optimizer = optim.Adam(model.parameters(), lr=cfg.train.lr) # 定义优化器
logger.info('成功初始化模型.')
train_loader = data_loader(
pcap_file=cfg.train.train_pcap,
label_file=cfg.train.train_label,
statistic_file=cfg.train.train_statistic,
trimed_file_len=cfg.train.TRIMED_FILE_LEN) # 获得 train dataloader
test_loader = data_loader(
pcap_file=cfg.train.test_pcap,
label_file=cfg.train.test_label,
statistic_file=cfg.train.test_statistic,
trimed_file_len=cfg.train.TRIMED_FILE_LEN) # 获得 train dataloader
logger.info('成功加载数据集.')
best_prec1 = 0
for epoch in range(cfg.train.epochs):
adjust_learning_rate(optimizer, epoch, cfg.train.lr) # 动态调整学习率
train_process(train_loader, model, alpha, criterion_c, criterion_r, optimizer, epoch, device, 80) # train for one epoch
prec1 = validate_process(test_loader, model, device, 20) # evaluate on validation set
# remember the best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
# 保存最优的模型
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict()
}, is_best, model_path)
# 下面进入测试模式, 计算每个类别详细的准确率
logger.info('进入测试模式.')
model = resnet_AE(model_path, pretrained=True, num_classes=12).to(device) # 加载最好的模型
index2label = {j: i for i, j in cfg.test.label2index.items()} # index->label 对应关系
label_list = [index2label.get(i) for i in range(12)] # 12 个 label 的标签
pcap_data, statistic_data, label_data = get_tensor_data(
pcap_file=cfg.train.test_pcap,
statistic_file=cfg.train.test_statistic,
label_file=cfg.train.test_label,
trimed_file_len=cfg.train.TRIMED_FILE_LEN) # 将 numpy 转换为 tensor
pcap_data = (pcap_data/255).to(device) # 流量数据
statistic_data = (statistic_data.to(device) - mean_val)/std_val # 对数据做一下归一化
y_pred, _ = model(pcap_data, statistic_data) # 放入模型进行预测
_, pred = y_pred.topk(1, 1, largest=True, sorted=True)
Y_data_label = [index2label.get(i.tolist()) for i in label_data] # 转换为具体名称
pred_label = [index2label.get(i.tolist()) for i in pred.view(-1).cpu().detach()]
logger.info('Alpha:{}'.format(alpha))
display_model_performance_metrics(true_labels=Y_data_label,
predicted_labels=pred_label,
classes=label_list)
logger.info('Finished! (* ̄︶ ̄)')
def alpha_experiment_CENTIME():
alpha_list = [0, 0.001, 0.01, 0.1, 0.5, 1, 5, 10, 100]
for alpha in alpha_list:
CENTIME_train_pipeline(alpha)
time.sleep(10)
if __name__ == "__main__":
CENTIME_train_pipeline() # 用于测试
|
StarcoderdataPython
|
11334415
|
<reponame>evantzhao/nlp-ner-analysis<gh_stars>0
class Constants:
START = "<START(*)>"
END = "</END(STOP)>"
BPER = 0
BLOC = 1
BORG = 2
BMISC = 3
IPER = 4
ILOC = 5
IORG = 6
IMISC = 7
OTHER = 8
ALL_TAGS = {BPER, BLOC, BORG, BMISC, IPER, ILOC, IORG, IMISC, OTHER}
TAG_TO_STRING = {
BPER: "B-PER",
BLOC: "B-LOC",
BORG: "B-ORG",
BMISC: "B-MISC",
IPER: "I-PER",
ILOC: "I-LOC",
IORG: "I-ORG",
IMISC: "I-MISC",
OTHER: "O",
}
|
StarcoderdataPython
|
23880
|
import sys
from collections.abc import Mapping
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
# Use a service account
cred = credentials.Certificate('./service-account-key.json')
firebase_admin.initialize_app(cred)
db = firestore.client()
for row in sys.stdin:
id = row.strip()
sub = db.document(u'db_pilot_test', id).get()
if sub.exists:
#print (f'it exists, {sub.id}')
#print(f'{sub.id}, {sub.to_dict().get("totalEarnings") or 0}')
try:
sub.to_dict().get("totalEarnings")
except AttributeError:
print (f'Attribute error, {sub.id}')
else:
print(f'{sub.id}, {sub.to_dict().get("totalEarnings")}')
#else:
#print (f'nope it does not, {sub.id}')
|
StarcoderdataPython
|
1670832
|
<reponame>mrcbarbier/diffuseclique
from wagutils import *
import itertools
from statsmodels.nonparametric.smoothers_lowess import lowess
import pickle
from json import dump,load
import scipy.linalg as la
def reldist_type2(x, y):
xm, ym = np.mean(x), np.mean(y)
slope = np.mean((x - xm) * (y - ym) ** 2) / np.mean((x - xm) ** 2 * (y - ym))
return [slope, 1. / slope][int(np.abs(slope) > np.abs(1. / slope))]
def reldist_odr(x, y):
import scipy.odr as odr
def f(B, xx):
return B[0] * xx + B[1]
linear = odr.Model(f)
data = odr.Data(x, y, wd=1, we=1)
res = odr.ODR(data, linear, beta0=[.4, .0])
res = res.run()
b = res.beta[0]
b = np.array([b, 1 / b])[np.argmin(np.abs([b, 1 / b]))]
# print reldist_type2(x,y),b
return b
def reldist(x, y, boot=0, typ='', split=0, strip=1,**kwargs):
#
x,y=np.array(x),np.array(y)
idx = list(np.where((~np.isnan(x))&(~np.isnan(y)))[0])
if boot:
idx = list(np.random.choice(idx, replace=True, size=x.size))
x, y = x[idx], y[idx]
#
if strip:
idx = np.argsort(x)
z = int(np.floor(len(idx) / 20))
idx = idx[z:-z]
x, y = x[idx], y[idx]
idx = np.argsort(y)
return reldist(x[idx[z:-z]], y[idx[z:-z]], boot=0, strip=0, typ=typ,**kwargs)
if split:
idx = np.argsort(x)
return np.mean([reldist(x[idx], y[idx], boot=0, split=0, typ=typ,**kwargs) for idx in np.array_split(idx, split)])
#
if 'odr' in typ:
return reldist_odr(x, y)
if 'type2' in typ or (typ is None and 'type2' in sys.argv):
return reldist_type2(x, y)
if 'loglike' in typ:
if not 'var' in kwargs:
code_debugger()
v=kwargs.get('var',1)
if v is 1:
print 'No var found'
if len(v.shape)==2:
try:
v=v[np.ix_(idx,idx)]
except:
pass
if v.shape[0]==x.shape[0]:
# print 'Using covariance matrix'
return -np.dot((x-y).ravel(),np.dot(la.inv(v ),(x-y).ravel() ))/x.size
return -np.mean((x - y) ** 2 / v[idx])
#
# Relative distance as correlation rescaled by max variance - like correlation but penalizes != scalings
cov = np.cov(x, y)
return cov[0, 1] / np.max([cov[0, 0], cov[1, 1]])
def slopedist(x, y, etai, etaj, boot=0, debug=0, per_species=None, linearize=0,**kwargs):
if boot:
idx = list(np.random.choice(range(x.size), replace=True, size=x.size))
x, y = x[idx], y[idx]
if not per_species is None:
i, j, species = per_species
idx = [np.where(i == z)[0] for z in species]
slopy = [linregress(etaj[i], y[i])[0] if len(i)>2 else np.nan for i in idx]
slopx = [linregress(etaj[i], x[i])[0] if len(i)>2 else np.nan for i in idx]
#
else:
idx = np.argsort(etai)
idx = np.array_split(idx, 4)
slopy = [linregress(etaj[i], y[i])[0] if len(i)>2 else np.nan for i in idx]
slopx = [linregress(etaj[i], x[i])[0] if len(i)>2 else np.nan for i in idx]
loc = np.array([np.median(etai[i]) for i in idx])
#
slopx,slopy,loc=[np.array(z) for z in ( slopx,slopy,loc)]
if linearize:
good=(~np.isnan(slopx))&(~np.isnan(slopy))
slopx,slopy,loc=slopx[good],slopy[good],loc[good]
a = np.argsort(loc)
slopx, slopy = slopx[a], slopy[a]
sx, ix = linregress(loc, slopx)[:2]
sy, iy = linregress(loc, slopy)[:2]
slopx, slopy = loc * sx + ix, loc * sy + iy
#
if 'debug' in sys.argv or debug:
plt.close('all')
plot(loc, slopy, hold=1)
scatter(loc, slopx)
code_debugger()
kwargs.setdefault('strip',0)
if kwargs.get('return_all'):
return slopx,slopy,reldist(slopy, slopx, **kwargs)
return reldist(slopy, slopx, **kwargs)
def rowdist(x,y,i,**kwargs):
kwargs.setdefault('strip',0)
species=kwargs.get('species',np.unique(i))
meanx=np.array([np.mean(x[i==s]) if (i==s).any() else 0 for s in species])
meany=np.array([np.mean(y[i==s]) if (i==s).any() else 0 for s in species])
return reldist(meanx,meany,**kwargs)
def get_species(df):
return sorted(set(np.concatenate(df['composition'].values)))
def get_path(exp,return_suffix=0):
suffix = ''
if 'bug' in sys.argv:
suffix += '_debug'
if 'detrendK' in sys.argv:
suffix += '_detK'
elif 'detrendblock' in sys.argv:
suffix += '_detb'
elif 'detrend' in sys.argv:
suffix += '_det'
if 'cheat' in sys.argv:
suffix += '_cheat'
if 'bminfer' in sys.argv:
suffix += '_bminfer'
path = Path('data/' + exp + suffix)
path.mkdir()
if return_suffix:
return path,suffix
return path
def hyperplane_light(df,species,**kwargs):
df=df.copy()
from numpy.linalg import lstsq, norm as lanorm, inv as lainv
from scipy.optimize import least_squares
S = len(species)
mat = np.zeros((S, S)) #Returned matrix
for sidx,s in enumerate(species):
res=None
notsidx,others=[list(x) for x in zip(*[(o,oth) for o,oth in enumerate(species) if oth!=s])]
xs = df[df[s] != 0][species].values
xnomono=df[(df[s]!=0) & (np.max(df[others],axis=1)!=0 ) ]
if not xnomono.size:
print 'hyperplane skipping',s,'only present in monoculture'
mat[sidx,sidx]=-1
continue
xsT = xs.T
def costfeta(y,weights=None):
yy = -np.ones(S)
yy[notsidx] = y
if weights is None:
return (np.dot(yy, xsT) +1)
else:
return (np.dot(yy, np.sum(weights*xsT,axis=1)/np.sum(weights) ) +1)
res=least_squares(costfeta,-np.zeros(S-1) )
row=list(res.x)
row.insert(sidx, -1)
mat[sidx]=row
mat[sidx,np.sum(np.abs(xsT),axis=1)==0]=np.nan
return mat
def hyperplane(df,species,etamode=0,distances=0,use_var=0,missing=0,**kwargs):
df=df.copy()
debug=kwargs.get('debug')
from numpy.linalg import lstsq, norm as lanorm, inv as lainv
from scipy.optimize import least_squares
S = len(species)
mat = np.zeros((S, S)) #Returned matrix
compmat=np.zeros((S,S)) #To compare between differnet methods
errmat=np.zeros((S,S)) #Matrix of stderr on coefficients
table=[]
sidx=-1
res=None
Kmono=kwargs.get("K",None)
if Kmono is None:
Kmono = np.array(
[df[np.logical_and(np.sum(df[species].values > 0, axis=1) == 1, df[s] > 0)][s].mean() for s in
species])
Kmono[np.isnan(Kmono)] = 10 ** -10
for s in species:
res,res2=None,None
sidx+=1
rsquared=0
notsidx = [z for z in range(S) if z != sidx]
xs = df[df[s] != 0][species].values
xnomono=df[(df[s]!=0) & (np.max(df[[oth for oth in species if oth!=s]],axis=1)!=0 ) ]
if not xnomono.size:
print 'hyperplane skipping',s,'only present in monoculture'
mat[sidx,sidx]=-1
dic={'species':s,'R2':0,'K':10**-10,'Kvar':0 }
table.append(dic)
continue
if etamode==1:
print 'basic eta mode'
xs = xs / np.atleast_1d(Kmono)
xs[:, np.where(np.isnan(Kmono))[0]] = 0
xsT = xs.T
weights=np.ones(xs.shape[0])
if 'weights' in kwargs:
weights*=[ kwargs['weights'].get(surv,0) for surv in np.sum(xs>0,axis=1)]
if distances or debug:
# print 'USING DISTANCES',distances,debug
dxs = np.concatenate([xs - x for x in xs]).T # [1:]-xs[0]
def costf(y,weights=None):
yy = -np.ones(S)
yy[notsidx] = y
return np.dot(yy, dxs)# -Kmono[sidx]
try:
res = least_squares(costf,- np.ones(S - 1))
if kwargs.get('weights',None) and not np.allclose(weights,1):
print 'Weights+distances not implemented yet'
res = least_squares(costf, res.x,kwargs={'weights':weights})
except Exception as e:
print 'Failed least_squares',e
code_debugger()
ai = list(res.x)
residuals=res.fun
ai.insert(sidx, -1)
mat[sidx] = ai
Ks=-np.dot(ai,xsT)
rsquared = 1 - np.sum(residuals ** 2) / np.sum((dxs-np.mean(dxs,axis=1).reshape((S,1)) )**2 )
if (not distances) or debug:
def costfeta(y,weights=None):
# return np.dot(y,xsT[notsidx])-xsT[sidx]+ifelse(etamode=='given',1,Kmono[sidx])
yy = -np.ones(S)
yy[notsidx] = y
if weights is None:
return (np.dot(yy, xsT) +1)
else:
return (np.dot(yy, np.sum(weights*xsT,axis=1)/np.sum(weights) ) +1)
def costfnoeta(y,weights=None):
# return np.dot(y,xsT[notsidx])-xsT[sidx]+ifelse(etamode=='given',1,Kmono[sidx])
yy = -np.ones(S)
yy[notsidx] = y[notsidx]
if weights is None:
return np.dot(yy, xsT) +y[sidx]
else:
raise Exception('NOT READY')
return (np.dot(yy, np.sum(weights*xsT,axis=1)/np.sum(weights) ) +1)
Ks = None
if etamode:
try:
res2=least_squares(costfeta,-np.ones(S-1) )
except:
code_debugger()
if kwargs.get('weights',None) and not np.allclose(weights,1):
# code_debugger()
res2 = least_squares(costfeta, res2.x,kwargs={'weights':weights})
comparison=list(res2.x)
residuals=costfeta(res2.x)
else:
x0=-np.ones(S)
x0[sidx]=1
res2=least_squares(costfnoeta,x0 )
if kwargs.get('weights',None) and not np.allclose(weights,1):
# code_debugger()
res2 = least_squares(costfnoeta, res2.x,kwargs={'weights':weights})
Ks=res2.x[sidx]
comparison=list(res2.x[notsidx])
residuals = costfnoeta(res2.x)
if use_var:
xvarT = df[df[s] != 0][[sp + '_var' for sp in species]].values.T
xvarT[np.isnan(xvarT)]=0
try:
def costd(yy):
dd,vv=xsT,np.clip(xvarT,10**-5,None)
tmpx=costfeta(yy)
tmpv=( np.dot(yy**2, vv[notsidx]) + vv[sidx] ) ** .5
# code_debugger()
# tmpv=( np.sum(vv,axis=0) ) ** .5
# tmpv=1
# print tmpv
return tmpx/tmpv
tmpres = list(
least_squares(costd, -np.ones(S - 1)).x)
tmpres2 = list(
least_squares(costd, comparison).x)
comparison=tmpres2
# print costd(np.array(tmpres)),costd(np.array(tmpres2))
except:
print 'Failure'
code_debugger()
# print 'Final',np.sum(costd(np.array(comparison))**2)
comparison.insert(sidx, -1)
if Ks is None:
Ks=-np.dot(comparison,xsT)
compmat[sidx]=comparison
rsquared = 1 - np.sum(residuals ** 2) / np.sum((xsT-np.mean(xsT,axis=1).reshape((S,1)) )**2 )
if np.isnan(rsquared).any():
code_debugger()
# rsquared = 1 - np.sum(residuals ** 2) / np.mean() np.var(xsT)#[sidx])
# if rsquared<0:
# code_debugger()
if debug:
code_debugger()
try:
def makerr(res):
from scipy.linalg import svd
tmp, s, VT = svd(res.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s ** 2, VT)
return np.clip(np.diag(pcov)*2*res.cost/ np.clip(res.jac.shape[0] - res.jac.shape[1],10**-10,None),None,100)
fres=res
if fres is None:
fres=res2
if fres.jac.shape[1]==S:
errmat[sidx]=makerr(fres)**.5
else:
errmat[sidx,[si for si in range(S) if si != sidx]]=makerr(fres)**.5
except Exception as e:
print 'ERROR hyperplane:',e
dic={'species':s,'R2':rsquared,'K':np.mean(Ks),'Kvar':np.var(Ks) }
table.append(dic)
tab=pd.DataFrame(table)
Ks=np.array([tab.set_index('species')['K'].loc[s] for s in species ])
if not distances:
mat=compmat
np.fill_diagonal(errmat,0)
#
# DEAL WITH MISSING PAIRS
missingpairs=[(i,j) for i in species for j in species if not np.max(np.prod(df[[i,j]].values,axis=1))>0 ]
for i,j in missingpairs:
mat[species.index(i),species.index(j)]=np.nan
mat[species.index(j),species.index(i)]=np.nan
if missing=='mean':
mat[np.isnan(mat)]=np.mean(nonan(offdiag(mat)) )
else:
mat[np.isnan(mat)] = missing
if etamode:
# tab['Kdiff']=tab['K']
tab['K']=Kmono*tab['K']
# code_debugger()
beta=mat
alpha = mat / np.multiply.outer(Ks, 1. / Ks)
else:
alpha=mat
beta=mat*np.multiply.outer(Ks,1./Ks)
return alpha,beta,tab,errmat/(np.abs(mat)+10**-10)
def correlcalc(etafull,beta,gamma=0,return_all=1,pad=0,rank=0,**kwargs):
'''Compute plot of prediction versus theory for means and correlations'''
def bootstrap(x):
if not np.sum(x.shape)>0:
return x
return np.mean(np.random.choice(x, size=x.size))
beta=beta.copy()
S=etafull.shape[0]
etamean = np.array([bootstrap(etafull[i]) for i in range(S)])
bm, bv, betatheo = hebbian_getbstats(beta, etamean,**kwargs ) # betaerr=ana[i][j].get('beta_relerr', np.ones(beta.shape)))
if isinstance(gamma,basestring):
gamma=np.corrcoef(offdiag(beta),offdiag(beta.T))[0,1]
# print ' gamma',gamma
betatheo = bm + (betatheo - bm) + gamma * (betatheo.T - bm)
arange=np.arange(S)
mean_i=np.multiply.outer(np.arange(S),np.ones(S)).astype('int')
mean_j=mean_i.T
# code_debugger()
# bet=beta.copy()
# bet[bet == 0] = np.nan
# bet[np.abs(bet)>3.6]=np.nan
betadiff = beta - betatheo
diag = np.multiply.outer(np.ones(S), np.eye(S))
def removeself(mat):
S = mat.shape[0]
ss = range(S)
mat2 = [mat[i][np.ix_([s for s in ss if s != i], [s for s in ss if s != i])] for i in range(S)]
return np.array(mat2)
empirical = removeself(np.einsum('ij,ik->ijk', betadiff, betadiff))
var = np.array([np.nanmean(empirical[i][np.eye(S - 1) != 0]) for i in range(S)]).reshape((-1, 1, 1))
empirical /= var + 10 ** -15
empirical -= removeself(diag)
prediction = removeself(
- np.multiply.outer(1. / (np.sum(etamean ** 2) - etamean ** 2 + 0.0001), np.multiply.outer(etamean, etamean)))
def ms(x):
return np.concatenate([np.nanmean(x, axis=(1)), np.nanmean(x, axis=(2))])
corr_i=np.multiply.outer(arange,np.ones((S-1,S-1)) ).astype('int')
corr_j=removeself(np.multiply.outer(np.ones(S),np.multiply.outer(arange,np.ones(S)) ).astype('int'))
corr_k=removeself(np.multiply.outer(np.ones(S),np.multiply.outer(np.ones(S),arange) ).astype('int'))
# prediction,empirical=prediction[removeself(diag)==0],empirical[removeself(diag)==0]
# prediction,empirical=ms(prediction),ms(empirical) #Makes things significantly worse
if kwargs.get('remove_zeros',1):
beta[beta==0]=np.nan
results=dict( [('mean_theo', offdiag(betatheo)), ('mean_emp', offdiag(beta)), ('corr_theo', prediction.ravel()),
('corr_emp', empirical.ravel()),('mean_etai',etamean[list( offdiag(mean_i))] ),('mean_etaj',etamean[list( offdiag(mean_j))] ),
('corr_etai', etamean[list(corr_i.ravel())] ) ] )
# code_debugger()
for z in ('corr_i', 'corr_j', 'corr_k', 'mean_i', 'mean_j'):
val=locals()[z]
if 'mean' in z:
val=offdiag(val)
else:
val=val.ravel()
results[z]=val
if rank:
results={i:getranks(results[i]) for i in results}
results['bm']=bm
results['bv']=bv
from scipy.stats import sem, linregress, siegelslopes, theilslopes, ttest_1samp
try:
summary={v: linregress(results[v+'_theo'][~np.isnan( results[v+'_emp'])], results[v+'_emp'][~np.isnan( results[v+'_emp'])] )[0] for v in ('mean','corr')}
except Exception as e:
print e
summary={}
if return_all:
results.update(summary)
if pad:
for k in results:
if 'mean_' in k:
results[k]=np.concatenate([results[k], np.ones(len(results['corr_theo'])-len(results[k]) ) *np.nan ])
# else:
return results
return summary
def infer_bm(eta,meanfield=1,nmat=1,S=None,maxtrials=100,resolution=ifelse('bug' in sys.argv,3,15),use_noise=0, **kwargs):
from numpy.linalg import lstsq, norm as lanorm, inv as lainv
from scipy.special import erf
import time
Salive=eta.shape[0]
if S is None:
S=Salive
tstart=time.time()
eta=eta[np.argsort(np.mean(eta,axis=1))]
mneta=np.mean(np.median(eta,axis=1))
sdeta=np.std(np.median(eta,axis=1))
phieta=np.mean( eta>0) *Salive*1./S
if eta.shape[1] == 1 or Salive<2:
covmat = np.eye(3)
else:
var_mneta = np.array(np.mean([np.random.choice(eta[i], size=maxtrials) for i in range(Salive)], axis=0))
var_sdeta = np.array(np.std([np.random.choice(eta[i], size=maxtrials) for i in range(Salive)], axis=0))
var_phieta = np.array(np.mean([np.random.choice((eta[i] > 0), size=maxtrials) for i in range(Salive)], axis=0) *
(Salive - np.random.randint(0, 2, maxtrials)) * 1. / S)
covmat = np.cov([var_mneta, var_sdeta, var_phieta])
etavec=np.mean(eta,axis=1)
vare=np.mean(np.var(eta,axis=1)/etavec**1.5)
# bm_mf = (1. / mneta - 1) / S
bm_surv=bm=hebbian_stablebm(etavec)
bs_surv=bs= np.sqrt( (1- np.mean(etavec)**2/np.mean(etavec**2)) /S)
tab,learntab =None,None
gamma=0
if not meanfield:
#
if 'table' in kwargs and not kwargs['table'] is None:
tab = kwargs['table'].copy()
else:
def make_mats(bm,bs,gamma):
etas=[]
trial=0
while len(etas)<nmat and trial<maxtrials:
trial+=1
mat= -genpool(S,mu=bm,sigma=bs,gamma=gamma)[-1]
np.fill_diagonal(mat,1)
e=np.dot(lainv(mat),np.ones(S))
a=np.argsort(e)
mat=mat[np.ix_(a,a)]
if (e<0).any():
e=np.clip(e[a],0,None)
e=dosimu(-mat,np.ones(S),np.ones(S),tmax=100,noise=0,x0=e+.001)[-1][-1]
e[e<10**-5]=0
# print e
if vare>0 and use_noise:
noise=np.array([np.random.gamma(1./(vare*ee**1.5),(vare*ee**1.5) ) if ee >0 else 0 for ee in e ])
else:
noise=1
etas.append(e*noise )
return etas
#
learntab=[]
print 'CREATING TABLE FOR BM INFERENCE'
if 'nogamma' in sys.argv:
gammas=[0]
gammares=1
else:
gammas=[0,0.3,0.6]
gammares=9
fineres=3*resolution
for ix,x in enumerate(np.linspace(bm_surv*.7,2*bm_surv,resolution)):
print '{}/{}'.format(ix+1,resolution)
for y in np.linspace(bs_surv*.7,2.4*bs_surv,resolution):
for g in gammas:
etas=make_mats(x,y,g)
mns=[np.mean(e) for e in etas]
sds=[np.std(e) for e in etas]
phi=[np.mean(e>0) for e in etas]
learntab.append({'bm':x,'bs':y,'gamma':g,'mn':np.mean(mns),'sd':np.mean(sds),'phi':np.mean(phi),},)
learntab=pd.DataFrame(learntab)
#
XY=learntab[['bm','bs','gamma']].values
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
# ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2,color='k',alpha=.7)
clf = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
XYsmooth=np.array([x for x in itertools.product( np.linspace(XY[0,0],XY[-1,0],fineres),
np.linspace(XY[0,1],XY[-1,1],fineres) , np.linspace(gammas[0],gammas[-1],gammares) ) ])
tab = pd.DataFrame({'bm': XYsmooth[:, 0], 'bs': XYsmooth[:, 1], 'gamma': XYsmooth[:, 2]})
for label in ['mn','sd','phi']:
Z=learntab[label].values #bandwidth(XY) #kernel='gaussian', bandwidth=.2
clf.fit(XY,Z)
Zsmooth = clf.predict(XYsmooth)
tab[label]=Zsmooth
for tb in (tab,learntab):
if tb is None:
continue
try:
dist=[ np.sqrt( np.dot( (mneta-mn,sdeta-sd,phieta-phi),np.dot(lainv(covmat), (mneta-mn,sdeta-sd,phieta-phi) ) ) ) for mn,sd,phi in tb[['mn','sd','phi']].values]
except:
code_debugger()
# dist = np.abs(mneta*phieta-tb['mn'].values)
tb['dist']=dist
tb['p']=[(1-erf( dd/np.min(dist) ) )/2. for dd in dist]
# code_debugger()
if 'bug' in sys.argv:
for i in ('p','dist','phi','mn'):
plt.figure()
if not learntab is None:
gammas = learntab['gamma'].unique()
plt.subplot(121)
plt.imshow(learntab[learntab['gamma']==gammas[0]].pivot('bm', 'bs')[i]), plt.colorbar()
plt.subplot(122)
#
bms,bss=[['{:.2f}'.format(l) for l in ll] for ll in (np.sort(tab['bm'].unique()),np.sort(tab['bs'].unique()))]
gammas = tab['gamma'].unique()
plt.imshow( tab[tab['gamma']==gammas[0]].pivot('bm', 'bs')[i]), plt.colorbar()
ax = plt.gca()
dx,dy=ax.get_xticks(),ax.get_yticks()
def getidx(lst,z):
return [lst[int(i)] if i>=0 and i<len(lst) else '' for i in z]
ax.set_yticklabels(getidx(bms,dy)),ax.set_xticklabels(getidx(bss,dx)),plt.suptitle(i)
plt.show()
# code_debugger()
bm,bs,gamma=tab[tab['p']>.95 *tab['p'].max()][['bm','bs','gamma']].median()
tend=time.time()
# bloc = hebbian_convert(eta, bm, bs, forward=0)
# bm = bloc['bm_pool']
print 'Time',tend-tstart, 'bm,bs:',bm_surv,bs_surv, '->',bm,bs,'gamma',gamma#,'bs',bs,'<-',bs_surv
if np.isnan([bm,bs]).any():
code_debugger()
return bm,bs,gamma, tab
def make_groundtruth(S=8,species=None,noise=.08,sizes=(1,2,4,8),replicas=1,nplots=None,plots=None,**kwargs):
"""Create a fake experimental setup to test inference methods."""
from scipy.misc import comb
table=[]
if species is None:
import string
ref=string.ascii_lowercase+string.ascii_uppercase+''.join([str(i) for i in range(10)])
species=[ref[i] for i in range(S)]
species=np.array(species)
rs,Ks,beta=genpool(S,**kwargs)
rs=kwargs.pop('rs',rs)
Ks=kwargs.pop('Ks',Ks)
beta=kwargs.pop('beta',beta)
Aij=beta*np.multiply.outer(rs,1./Ks)
alpha=beta*np.multiply.outer(Ks,1./Ks)
def get_true_eq(compo,N=None):
bet=beta - np.eye(S)
if isinstance(compo[0],basestring):
sidx=[list(species).index(i) for i in compo]
else:
sidx=compo
bet=bet[np.ix_(sidx,sidx)]
eqs = find_eqs(bet, uninvadable=1, stable=1, largest=N is None)
if not eqs:
eqs = find_eqs(bet, uninvadable=0, stable=1, largest=N is None)
if not eqs:
eqs = find_eqs(bet, uninvadable=0, stable=0, largest=N is None)
eq= eqs[0]
if not N is None:
from scipy.linalg import norm
eq= eqs[np.argmin([ norm(N-eq) for eq in eqs ]) ]
val=np.zeros(S)
val[sidx]=eq
return val
if plots is None:
if sizes is None:
sizes = [2 ** x for x in range(int(np.floor(np.log(S * 1.001) / np.log(2)) + 1))]
if not S in sizes:
sizes += [S]
sizes = np.array(sizes)
if replicas is None:
replicas = [int(np.round(S / s)) for s in sizes]
else:
replicas = np.atleast_1d(replicas)
if replicas.shape[0] < sizes.shape[0]:
replicas = replicas * np.ones(sizes.shape)
if nplots is None:
nplots = np.array([min(10, comb(S, s)) if s > 1 else S for s in sizes]) * replicas
plots=[]
for size, nrep, npl in zip(sizes,replicas,nplots):
nsamp=max(1,npl/nrep)
if npl>comb(S,size):
samples=list(tuple(x) for x in itertools.combinations(range(int(S)),size))
elif comb(S,size)<5000:
allcombs=[tuple(x) for x in itertools.combinations(range(int(S)), size)]
samples = [allcombs[i] for i in np.random.choice(range(len(allcombs)),int(nsamp),replace=0 )]
else:
samples=[tuple(np.random.choice(range(int(S)),size,replace=0)) for i in range(int(nsamp))]
try:
nrep = max(1,int(min(nrep, npl / len(samples))))
except:
print 'ERROR', size,nrep,npl,nsamp,samples
code_debugger()
# print size, nrep,npl,samples, len(samples)==len(set(samples))
plots+=[species[list(sidx)] for sidx in samples for rep in range(nrep) ]
plotn=0
x0=kwargs.pop('x0',np.ones(len(species)))
for plot in plots:
plotn += 1
sidx=[list(species).index(s) for s in plot]
print 'Plot {} Species {}'.format(plotn, species[sidx])
years,results = dosimu(Aij[np.ix_(sidx, sidx)], Ks[sidx], rs[sidx],x0=x0[sidx], noise=noise, evol=1, print_msg=0, **kwargs)
#print results[-1]/Ks[sidx]
for year, res in zip(years,results):
total = np.sum(res)
dic = {'plot': plotn, 'total': total, 'total/m2': total, 'year': int(np.round(year)), 'richness': len(plot),
'composition': tuple(species[sidx]),#'equilibrium':get_true_eq(sidx,plotn)
}
basekeys=sorted(dic.keys())
abund = np.zeros(len(species))
# code_debugger()
abund[sidx] = res
dic.update({s: a for s, a in zip(species, abund)})
table.append(dic)
df=pd.DataFrame(table)
df=df[list(basekeys)+list(species)]
ground={}
ground.update(kwargs)
ground.update({'A':Aij-np.diag(rs/Ks),'beta':beta-np.eye(S),'alpha':alpha-np.eye(S),'Ks':Ks, 'rs':rs,'noise':noise,
'equilibrium':{compo:get_true_eq(compo) for compo in set(df['composition'].values) } }
)
return df,ground
|
StarcoderdataPython
|
4896412
|
<reponame>jeikabu/lumberyard
from __future__ import print_function, absolute_import
import itertools
from .typeconv import TypeManager, TypeCastingRules
from numba import types
default_type_manager = TypeManager()
def dump_number_rules():
tm = default_type_manager
for a, b in itertools.product(types.number_domain, types.number_domain):
print(a, '->', b, tm.check_compatible(a, b))
def _init_casting_rules(tm):
tcr = TypeCastingRules(tm)
tcr.safe_unsafe(types.boolean, types.int8)
tcr.safe_unsafe(types.boolean, types.uint8)
tcr.promote_unsafe(types.int8, types.int16)
tcr.promote_unsafe(types.uint8, types.uint16)
tcr.promote_unsafe(types.int16, types.int32)
tcr.promote_unsafe(types.uint16, types.uint32)
tcr.promote_unsafe(types.int32, types.int64)
tcr.promote_unsafe(types.uint32, types.uint64)
tcr.safe_unsafe(types.uint8, types.int16)
tcr.safe_unsafe(types.uint16, types.int32)
tcr.safe_unsafe(types.uint32, types.int64)
tcr.safe_unsafe(types.int16, types.float32)
tcr.safe_unsafe(types.int32, types.float64)
tcr.unsafe_unsafe(types.int32, types.float32)
# XXX this is inconsistent with the above; but we want to prefer
# float64 over int64 when typing a heterogenous operation,
# e.g. `float64 + int64`. Perhaps we need more granularity in the
# conversion kinds.
tcr.safe_unsafe(types.int64, types.float64)
tcr.safe_unsafe(types.uint64, types.float64)
tcr.promote_unsafe(types.float32, types.float64)
tcr.safe(types.float32, types.complex64)
tcr.safe(types.float64, types.complex128)
tcr.promote_unsafe(types.complex64, types.complex128)
# Allow integers to cast ot void*
tcr.unsafe_unsafe(types.uintp, types.voidptr)
return tcr
default_casting_rules = _init_casting_rules(default_type_manager)
|
StarcoderdataPython
|
1970542
|
<filename>src/13/13073.py
"""
13073. Sums
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 920 ms
해결 날짜: 2020년 9월 19일
"""
def main():
for _ in range(int(input())):
N = int(input())
print(f'{N * (N + 1) // 2} {N ** 2} {N * (N + 1)}')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4871598
|
<reponame>dmklee/nuro-arm<filename>nuro_arm/robot/pybullet_simulator.py
import pybullet_data
import pybullet as pb
import numpy as np
import os
import nuro_arm
from nuro_arm import transformation_utils, constants
class PybulletSimulator:
def __init__(self,
headless=True,
client=None,
):
'''Base class for pybullet simulator to handle initialization and attributes
about robot joints
Parameters
----------
connection_mode : {pb.GUI, pb.DIRECT}
Indicates whether pybullet simulator should generate GUI or not
Attributes
----------
end_effector_link_index : int
index of hand link, this is used to specify which link
should be used for IK
arm_joint_ids: list of int
indices of joints that control the arm
gripper_joint_ids: list of int
indices of two gripper joints. for simulator, gripper operation is
controlled by two joints which should both move in concert to reflect
the real xArm
gripper_closed : ndarray
joint positions of gripper joints that result in closed gripper in
the simulator; shape = (2,); dtype=float
gripper_opened : ndarray
joint positions of gripper joints that result in opened gripper in
the simulator; shape = (2,); dtype=float
'''
self.arm_joint_ids = [1,2,3,4,5]
self.gripper_joint_ids = [6,7]
self.dummy_joint_ids = [8]
self.finger_joint_ids = [9,10]
self.end_effector_link_index = 11
self.arm_joint_limits = np.array(((-2, -1.58, -2, -1.8, -2),
( 2, 1.58, 2, 2.0, 2)))
self.gripper_joint_limits = np.array(((0.05,0.05),
(1.38, 1.38)))
self.dummy_joint_limits = np.array(((0.025,),(0.055,)))
self.finger_joint_limits = np.array(((0.0145, 0.029,),
(0.0445, 0.089,)))
connection_mode = pb.DIRECT if headless else pb.GUI
if client is None:
self._client = self._initialize_client(connection_mode)
else:
self._client = client
# suction cups are 12 mm tall when not pressed
robot_pos = (0, 0, 0.012)
robot_rot = (0, 0, 0, 1)
self.robot_id = self.initialize_robot(robot_pos, robot_rot)
self.n_joints = pb.getNumJoints(self.robot_id,
physicsClientId=self._client)
self.joint_names = []
self.link_names = []
self.joint_ll = []
self.joint_ul = []
for j_id in range(self.n_joints):
j_info = pb.getJointInfo(self.robot_id, j_id, physicsClientId=self._client)
self.joint_names.append(j_info[1].decode('ascii'))
self.joint_ll.append(j_info[8])
self.joint_ul.append(j_info[9])
self.link_names.append(j_info[12].decode('ascii'))
self.joint_ll = np.array(self.joint_ll)
self.joint_ul = np.array(self.joint_ul)
def _initialize_client(self, connection_mode):
'''Creates pybullet simulator and loads world plane.
Parameters
----------
connection_mode : {pb.GUI, pb.DIRECT}
Indicates whether pybullet simulator should generate GUI or not
Returns
-------
client : int
Identifier used to specify simulator client. This is needed when
making calls because there might be multiple clients running
'''
client = pb.connect(connection_mode)
pb.setPhysicsEngineParameter(numSubSteps=0,
numSolverIterations=100,
solverResidualThreshold=1e-7,
constraintSolverType=pb.CONSTRAINT_SOLVER_LCP_SI,
physicsClientId=client)
# this path is where we find platform
pb.setAdditionalSearchPath(pybullet_data.getDataPath())
self.plane_id = pb.loadURDF('plane.urdf', [0,-0.5,0],
physicsClientId=client)
pb.changeDynamics(self.plane_id, -1,
linearDamping=0.04,
angularDamping=0.04,
restitution=0,
contactStiffness=3000,
contactDamping=100,
physicsClientId=client)
return client
def initialize_robot(self, pos, rot=[0,0,0,1]):
'''Adds robot to simulator, setting up gripper constraints and initial
motor commands
Parameters
----------
pos: array_like
xyz position, length 3
rot: array_like
quaternion, length 4
Returns
-------
int
id for robot body
'''
robot_urdf_path = os.path.join(constants.URDF_DIR, 'xarm.urdf')
robot_id = pb.loadURDF(robot_urdf_path,
pos,
rot,
flags=pb.URDF_USE_SELF_COLLISION,
physicsClientId=self._client)
self.base_pos = pos
self.base_rot = rot
# # set up constraints for linkage in gripper fingers
for i in [0,1]:
constraint = pb.createConstraint(robot_id,
self.gripper_joint_ids[i],
robot_id,
self.finger_joint_ids[i],
pb.JOINT_POINT2POINT,
(0,0,0),
(0,0,0.03),
(0,0,0),
physicsClientId= self._client
)
pb.changeConstraint(constraint, maxForce=1000000)
# reset joints in hand so that constraints are satisfied
hand_joint_ids = self.gripper_joint_ids + self.dummy_joint_ids + self.finger_joint_ids
hand_rest_states = [0.05, 0.05, 0.055, 0.0155, 0.031]
[pb.resetJointState(robot_id, j_id, jpos, physicsClientId=self._client)
for j_id,jpos in zip(hand_joint_ids, hand_rest_states)]
# allow finger and linkages to move freely
pb.setJointMotorControlArray(robot_id,
self.dummy_joint_ids+self.finger_joint_ids,
pb.POSITION_CONTROL,
forces=[0,0,0],
physicsClientId= self._client)
# # make arm joints rigid
pb.setJointMotorControlArray(robot_id,
self.arm_joint_ids,
pb.POSITION_CONTROL,
5*[0],
positionGains=5*[0.2],
physicsClientId= self._client)
return robot_id
def get_hand_pose(self):
'''Get position and orientation of hand (i.e. where grippers would close)
This is not the same as the hand link, instead we are interested in the
space where the grippers would engage with an object
Returns
-------
ndarray
position vector; shape=(3,); dtype=float
ndarray
quaternion; shape=(3,); dtype=float
'''
link_state = pb.getLinkState(self.robot_id,
self.end_effector_link_index,
physicsClientId=self._client)
pos = link_state[4]
rot = link_state[5]
return pos, rot
def _get_link_pose(self, link_name):
'''Returns position and orientation of robot's link
Parameters
----------
link_name: str
name of link in urdf. it should not include "_link" at end of name
Returns
-------
pos: ndarray
3D position; shape=(3,); dtype=float
rot: ndarray
euler angle; shape=(3,); dtype=float
'''
assert link_name in self.link_names
link_index = self.link_names.index(link_name)
link_state = pb.getLinkState(self.robot_id, link_index,
physicsClientId=self._client)
pos = link_state[4]
rot = pb.getEulerFromQuaternion(link_state[5])
return pos, rot
def reset_base_pose(self, pos, rot=(0,0,0,1)):
'''Resets position and orientation of robot base.
Parameters
----------
pos: array_like
xyz position, length 3
rot: array_like
quaternion, length 4
'''
pb.resetBasePositionAndOrientation(self.robot_id, pos, rot,
physicsClientId=self._client)
self.base_pos = pos
self.base_rot = rot
def close(self):
pb.disconnect(self._client)
|
StarcoderdataPython
|
3341076
|
import math
import torch
from torch.utils.data import Dataset, DataLoader
class MyDataset(Dataset):
def __init__(self, data, window, target_cols):
self.data = torch.Tensor(data)
self.window = window
self.target_cols = target_cols
self.shape = self.__getshape__()
self.size = self.__getsize__()
def __getitem__(self, index):
x = self.data[index: index + self.window]
y = self.data[index + self.window, self.target_cols]
return x, y
def __len__(self):
return len(self.data) - self.window
def __getshape__(self):
return (self.__len__(), *self.__getitem__(0)[0].shape)
def __getsize__(self):
return (self.__len__())
batch_size = 32
input_feature_size = 4
seq_length = 18
target_cols = 1
pin_memory = True
num_workers = 1
# data shape: (40000,)
data = torch.tensor([math.sin(x) for x in range(0, 10000 * input_feature_size)])
# data shape: (10000, 4)
data = data.view(10000, input_feature_size)
dataset = MyDataset(data, seq_length, target_cols)
total_data_size = len(dataset)
train_data_size = int(len(dataset) * 0.7)
valid_data_size = int(len(dataset) * 0.2)
test_data_size = total_data_size - train_data_size - valid_data_size
train_dataset, valid_dataset, test_dataset = torch.utils.data.random_split(
dataset, [train_data_size, valid_data_size, test_data_size]
)
train_iter = DataLoader(
train_dataset, batch_size=batch_size,
drop_last=True,
# num_workers=num_workers,
pin_memory=pin_memory
)
valid_iter = DataLoader(
valid_dataset, batch_size=batch_size,
drop_last=True,
# num_workers=num_workers,
pin_memory=pin_memory
)
test_iter = DataLoader(
test_dataset, batch_size=batch_size,
drop_last=True,
# num_workers=num_workers,
pin_memory=pin_memory
)
# for idx_batch, batch in enumerate(train_iter):
# X, Y = batch
# print(idx_batch, X.shape, Y.shape)
# print(X, Y)
# print("\n\n\n")
|
StarcoderdataPython
|
8183755
|
<reponame>ShuaibinLi/RL_baselines
import torch
import numpy as np
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DDPGAgent(object):
def __init__(self, algorithm, act_dim, expl_noise=0.1):
self.alg = algorithm
self.act_dim = act_dim
self.expl_noise = expl_noise
def sample(self, obs):
action_numpy = self.predict(obs)
action_noise = np.random.normal(0, self.expl_noise, size=self.act_dim)
action = (action_numpy + action_noise).clip(-1, 1)
return action
def predict(self, obs):
obs = torch.FloatTensor(obs.reshape(1, -1)).to(device)
action = self.alg.predict(obs)
action_numpy = action.cpu().detach().numpy().flatten()
return action_numpy
def learn(self, obs, action, reward, next_obs, terminal):
terminal = np.expand_dims(terminal, -1)
reward = np.expand_dims(reward, -1)
obs = torch.FloatTensor(obs).to(device)
action = torch.FloatTensor(action).to(device)
reward = torch.FloatTensor(reward).to(device)
next_obs = torch.FloatTensor(next_obs).to(device)
terminal = torch.FloatTensor(terminal).to(device)
critic_loss, actor_loss = self.alg.learn(obs, action, reward, next_obs,
terminal)
return critic_loss, actor_loss
|
StarcoderdataPython
|
6420027
|
import random
import re
from datetime import datetime
from info.libs.yuntongxun.sms import CCP
from info.models import User
from info.utils.response_code import RET
from . import passport_blue
from info import redis_store, constants, db
from flask import request, abort, make_response, current_app, jsonify, session
from info.utils.captcha.captcha import captcha
@passport_blue.route("/logout")
def logout():
session.pop("mobile", None)
session.pop("nick_name", None)
session.pop("user_id", None)
print("123")
return jsonify(errno=RET.PARAMERR, errmsg="退出成功")
@passport_blue.route("/login", methods=["POST"])
def login():
"""登录的ajax接口"""
# 第一步老规矩解析post数据
mobile = request.json.get("mobile")
password = request.json.get("password")
# 判断所有的值是否有空值,如果有空直接return走
if not all([mobile, password]):
return jsonify(errno=RET.PARAMERR, errmsg="参数发生错误")
# 对手机号进行正则匹配,不符合要求直接返回
if not re.match(r"^1[345678]\d{9}$", mobile):
return jsonify(errno=RET.PARAMERR, errmsg="请输入正确的电话号码")
# 开始进入数据库拉数据进行匹配
try:
user = User.query.filter(User.mobile == mobile).first()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据库链接出错")
if not user:
# 用户名输入错误
return jsonify(errno=RET.LOGINERR, errmsg="请输入正确的用户名")
if not user.check_passoword(password):
return jsonify(errno=RET.LOGINERR, errmsg="密码错误")
# 已经验证成功用户名和密码了,开始修改最后一次的登录的时候
user.last_login = datetime.now()
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="不知道为啥登录时间出错了")
# 登录成功先设置会话的session
session["user_id"] = user.id
session["moblie"] = user.mobile
session["nick_name"] = user.nick_name
# session.permanent = True # 设置session的过期时间为自己设置的选项
return jsonify(errno=RET.OK, errmsg="OK")
@passport_blue.route("/register", methods=["POST"])
def register():
"""实现用户在注册按钮点击之后实现的接口"""
# print(request.json)
# 先表单数据传送过来的所有的值
mobile = request.json.get("mobile")
smscode = request.json.get("smscode")
password = request.json.get("password")
# 判断所有的值是否有空值,如果有空直接return走
if not all([mobile, smscode, password]):
return jsonify(errno=RET.PARAMERR, errmsg="参数发生错误")
# 对手机号进行正则匹配,不符合要求直接返回
if not re.match(r"^1[345678]\d{9}$", mobile):
return jsonify(errno=RET.PARAMERR, errmsg="请输入正确的电话号码")
# 通过手机号码去redis中取出对应的数据进行判断
try:
redis_sms_code = redis_store.get("sms_" + mobile)
# print("手机验证码为:", redis_sms_code)
except Exception as e:
current_app.logger.error(e)
return jsonify(error=RET.DBERR, errmsg="这个手机号码没有发送验证码")
# 很重要的一步,进行手机验证码的校验
if not redis_sms_code:
return jsonify(error=RET.DATAERR, errmsg="验证码已经过期了")
if redis_sms_code != smscode:
return jsonify(error=RET.DATAERR, errmsg="手机验证码是咋了,匹配不正确")
# 对密码的位数进行匹配,暂时的要求至少得有6位
if len(password) < 6:
return jsonify(errno=RET.PARAMERR, errmsg="密码至少要输入6位以上")
# 能走到这步很明显成功满足所有的条件了,下面进行mysql添加用户的功能
user = User()
user.mobile = mobile # 用户的手机
user.nick_name = mobile # 用户的昵称,可以使用手机先代替
user.password = password # 用户的密码
user.last_login = datetime.now() # 先初始化登录时间
try:
db.session.add(user)
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="用户名已经被注册")
# 终于走到这里了,注册成功将注册写入会话session
session["user_id"] = user.id
session["moblie"] = mobile
session["nick_name"] = user.nick_name
# session.permanent = True # 设置session的过期时间为自己设置的选项
return jsonify(errno=RET.OK, errmsg="OK")
@passport_blue.route("/sms_code", methods=["POST"])
def send_sms_code():
"""
接口需要获取,手机号,图片验证码的内容,URL中的编号
取出redis的内容与用户的验证码内容进行校验
如果对比不一致,那么返回验证码输入错误
一致的话,生成验证码的内容,发送过去
:return:
"""
# 做测试的时候使用
# return jsonify(errno=RET.OK, errmsg="发送成功")
# 1、将参数提取出来
# params_dict = json.loads(request.data)
params_dict = request.json
mobile = params_dict.get("mobile")
image_code = params_dict.get("imageCode")
image_code_id = params_dict.get("image_code_id")
# print(mobile)
# print(image_code)
# print(image_code_id)
# 2、进行数据的校验(判断是否有值,是否符合规则)
if not all([mobile, image_code_id, image_code]):
return jsonify(errno=RET.PARAMERR, errmsg="参数发生错误")
# 对手机号进行正则匹配,不符合要求直接返回
if not re.match(r"^1[345678]\d{9}$", mobile):
return jsonify(errno=RET.PARAMERR, errmsg="请输入正确的电话号码")
# 通过image_code_id 取出数据验证码与填写的验证码进行匹配
try:
# print("imageCode_" + image_code_id)
redis_image_code = redis_store.get("imageCodeId_" + image_code_id)
# print("验证码:", redis_image_code)
except Exception as e:
current_app.logger.error(e)
# return jsonify(errno=RET.PARAMERR, errmsg="请输入正确的电话号码")
return jsonify(errno=RET.DBERR, errmsg="数据查询失败")
# 说明没有查询到数据
if not redis_image_code:
return jsonify(errno=RET.NODATA, errmsg="验证码已经过期")
if redis_image_code.upper() != image_code.upper():
return jsonify(errno=RET.DATAERR, errmsg="验证码输入错误")
# 生成一个6位数的随机数
authcode = "%06d" % random.randint(0, 999999)
# print("手机验证码是:", authcode)
current_app.logger.debug("手机的验证码是:" + str(authcode))
# # 下面实现验证码的功能,这里已经测试成功就暂时不给手机发送,直接打印出验证码
# result = CCP().send_template_sms(mobile, [authcode, int(constants.SMS_CODE_REDIS_EXPIRES / 60)], 1)
# if result:
# current_app.logger.error("手机验证发送出错")
# return jsonify(errno=RET.THIRDERR, errmsg="手机验证码发送失败")
# 将发送的随机验证填写到redis数据库中,key为sms_手机号,value值为验证码
try:
redis_store.set("sms_" + mobile, authcode, constants.SMS_CODE_REDIS_EXPIRES)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="手机验证码数据保存失败")
# 所有的都OK了,发送验证码
return jsonify(errno=RET.OK, errmsg="发送成功")
@passport_blue.route("/image_code")
def get_image_code():
"""
生成图片验证并返回
1.从request.agrs中取出值
2.要判断这个参数是否请求正确
3.生成图片验证码
:return:
"""
# 从request中取出值如果没有值那就是请求出错,抛出403的错误
image_code_id = request.args.get('imageCodeId', None)
if not image_code_id:
return abort(403)
# 通过api接口获取验证的内容和image的文件
name, text, image = captcha.generate_captcha()
current_app.logger.debug("验证码是:" + text)
# 将验证的内容写入redis的数据库,缓存时间为300秒
# print(type(image_code_id))
# print(text)
try:
redis_store.set("imageCodeId_" + image_code_id, text, constants.IMAGE_CODE_REDIS_EXPIRES)
except Exception as e:
current_app.logger.error(e)
response = make_response(image)
response.headers["Content-Type"] = "image/jpg"
return response
|
StarcoderdataPython
|
6648502
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import random
import signal
import sys
import threading
import time
import unittest
import ray.plasma as plasma
from ray.plasma.utils import (random_object_id, generate_metadata,
create_object_with_id, create_object)
from ray import services
USE_VALGRIND = False
PLASMA_STORE_MEMORY = 1000000000
def assert_get_object_equal(unit_test, client1, client2, object_id,
memory_buffer=None, metadata=None):
client1_buff = client1.get([object_id])[0]
client2_buff = client2.get([object_id])[0]
client1_metadata = client1.get_metadata([object_id])[0]
client2_metadata = client2.get_metadata([object_id])[0]
unit_test.assertEqual(len(client1_buff), len(client2_buff))
unit_test.assertEqual(len(client1_metadata), len(client2_metadata))
# Check that the buffers from the two clients are the same.
unit_test.assertTrue(plasma.buffers_equal(client1_buff, client2_buff))
# Check that the metadata buffers from the two clients are the same.
unit_test.assertTrue(plasma.buffers_equal(client1_metadata,
client2_metadata))
# If a reference buffer was provided, check that it is the same as well.
if memory_buffer is not None:
unit_test.assertTrue(plasma.buffers_equal(memory_buffer, client1_buff))
# If reference metadata was provided, check that it is the same as well.
if metadata is not None:
unit_test.assertTrue(plasma.buffers_equal(metadata, client1_metadata))
class TestPlasmaClient(unittest.TestCase):
def setUp(self):
# Start Plasma store.
plasma_store_name, self.p = plasma.start_plasma_store(
use_valgrind=USE_VALGRIND)
# Connect to Plasma.
self.plasma_client = plasma.PlasmaClient(plasma_store_name, None, 64)
# For the eviction test
self.plasma_client2 = plasma.PlasmaClient(plasma_store_name, None, 0)
def tearDown(self):
# Check that the Plasma store is still alive.
self.assertEqual(self.p.poll(), None)
# Kill the plasma store process.
if USE_VALGRIND:
self.p.send_signal(signal.SIGTERM)
self.p.wait()
if self.p.returncode != 0:
os._exit(-1)
else:
self.p.kill()
def test_create(self):
# Create an object id string.
object_id = random_object_id()
# Create a new buffer and write to it.
length = 50
memory_buffer = self.plasma_client.create(object_id, length)
for i in range(length):
memory_buffer[i] = chr(i % 256)
# Seal the object.
self.plasma_client.seal(object_id)
# Get the object.
memory_buffer = self.plasma_client.get([object_id])[0]
for i in range(length):
self.assertEqual(memory_buffer[i], chr(i % 256))
def test_create_with_metadata(self):
for length in range(1000):
# Create an object id string.
object_id = random_object_id()
# Create a random metadata string.
metadata = generate_metadata(length)
# Create a new buffer and write to it.
memory_buffer = self.plasma_client.create(object_id, length, metadata)
for i in range(length):
memory_buffer[i] = chr(i % 256)
# Seal the object.
self.plasma_client.seal(object_id)
# Get the object.
memory_buffer = self.plasma_client.get([object_id])[0]
for i in range(length):
self.assertEqual(memory_buffer[i], chr(i % 256))
# Get the metadata.
metadata_buffer = self.plasma_client.get_metadata([object_id])[0]
self.assertEqual(len(metadata), len(metadata_buffer))
for i in range(len(metadata)):
self.assertEqual(chr(metadata[i]), metadata_buffer[i])
def test_create_existing(self):
# This test is partially used to test the code path in which we create an
# object with an ID that already exists
length = 100
for _ in range(1000):
object_id = random_object_id()
self.plasma_client.create(object_id, length, generate_metadata(length))
try:
self.plasma_client.create(object_id, length, generate_metadata(length))
except plasma.plasma_object_exists_error as e:
pass
else:
self.assertTrue(False)
def test_get(self):
num_object_ids = 100
# Test timing out of get with various timeouts.
for timeout in [0, 10, 100, 1000]:
object_ids = [random_object_id() for _ in range(num_object_ids)]
results = self.plasma_client.get(object_ids, timeout_ms=timeout)
self.assertEqual(results, num_object_ids * [None])
data_buffers = []
metadata_buffers = []
for i in range(num_object_ids):
if i % 2 == 0:
data_buffer, metadata_buffer = create_object_with_id(
self.plasma_client, object_ids[i], 2000, 2000)
data_buffers.append(data_buffer)
metadata_buffers.append(metadata_buffer)
# Test timing out from some but not all get calls with various timeouts.
for timeout in [0, 10, 100, 1000]:
data_results = self.plasma_client.get(object_ids, timeout_ms=timeout)
# metadata_results = self.plasma_client.get_metadata(object_ids,
# timeout_ms=timeout)
for i in range(num_object_ids):
if i % 2 == 0:
self.assertTrue(plasma.buffers_equal(data_buffers[i // 2],
data_results[i]))
# TODO(rkn): We should compare the metadata as well. But currently
# the types are different (e.g., memoryview versus bytearray).
# self.assertTrue(plasma.buffers_equal(metadata_buffers[i // 2],
# metadata_results[i]))
else:
self.assertIsNone(results[i])
def test_store_full(self):
# The store is started with 1GB, so make sure that create throws an
# exception when it is full.
def assert_create_raises_plasma_full(unit_test, size):
partial_size = np.random.randint(size)
try:
_, memory_buffer, _ = create_object(unit_test.plasma_client,
partial_size,
size - partial_size)
except plasma.plasma_out_of_memory_error as e:
pass
else:
# For some reason the above didn't throw an exception, so fail.
unit_test.assertTrue(False)
# Create a list to keep some of the buffers in scope.
memory_buffers = []
_, memory_buffer, _ = create_object(self.plasma_client, 5 * 10 ** 8, 0)
memory_buffers.append(memory_buffer)
# Remaining space is 5 * 10 ** 8. Make sure that we can't create an object
# of size 5 * 10 ** 8 + 1, but we can create one of size 2 * 10 ** 8.
assert_create_raises_plasma_full(self, 5 * 10 ** 8 + 1)
_, memory_buffer, _ = create_object(self.plasma_client, 2 * 10 ** 8, 0)
del memory_buffer
_, memory_buffer, _ = create_object(self.plasma_client, 2 * 10 ** 8, 0)
del memory_buffer
assert_create_raises_plasma_full(self, 5 * 10 ** 8 + 1)
_, memory_buffer, _ = create_object(self.plasma_client, 2 * 10 ** 8, 0)
memory_buffers.append(memory_buffer)
# Remaining space is 3 * 10 ** 8.
assert_create_raises_plasma_full(self, 3 * 10 ** 8 + 1)
_, memory_buffer, _ = create_object(self.plasma_client, 10 ** 8, 0)
memory_buffers.append(memory_buffer)
# Remaining space is 2 * 10 ** 8.
assert_create_raises_plasma_full(self, 2 * 10 ** 8 + 1)
def test_contains(self):
fake_object_ids = [random_object_id() for _ in range(100)]
real_object_ids = [random_object_id() for _ in range(100)]
for object_id in real_object_ids:
self.assertFalse(self.plasma_client.contains(object_id))
self.plasma_client.create(object_id, 100)
self.plasma_client.seal(object_id)
self.assertTrue(self.plasma_client.contains(object_id))
for object_id in fake_object_ids:
self.assertFalse(self.plasma_client.contains(object_id))
for object_id in real_object_ids:
self.assertTrue(self.plasma_client.contains(object_id))
def test_hash(self):
# Check the hash of an object that doesn't exist.
object_id1 = random_object_id()
self.plasma_client.hash(object_id1)
length = 1000
# Create a random object, and check that the hash function always returns
# the same value.
metadata = generate_metadata(length)
memory_buffer = self.plasma_client.create(object_id1, length, metadata)
for i in range(length):
memory_buffer[i] = chr(i % 256)
self.plasma_client.seal(object_id1)
self.assertEqual(self.plasma_client.hash(object_id1),
self.plasma_client.hash(object_id1))
# Create a second object with the same value as the first, and check that
# their hashes are equal.
object_id2 = random_object_id()
memory_buffer = self.plasma_client.create(object_id2, length, metadata)
for i in range(length):
memory_buffer[i] = chr(i % 256)
self.plasma_client.seal(object_id2)
self.assertEqual(self.plasma_client.hash(object_id1),
self.plasma_client.hash(object_id2))
# Create a third object with a different value from the first two, and
# check that its hash is different.
object_id3 = random_object_id()
metadata = generate_metadata(length)
memory_buffer = self.plasma_client.create(object_id3, length, metadata)
for i in range(length):
memory_buffer[i] = chr((i + 1) % 256)
self.plasma_client.seal(object_id3)
self.assertNotEqual(self.plasma_client.hash(object_id1),
self.plasma_client.hash(object_id3))
# Create a fourth object with the same value as the third, but different
# metadata. Check that its hash is different from any of the previous
# three.
object_id4 = random_object_id()
metadata4 = generate_metadata(length)
memory_buffer = self.plasma_client.create(object_id4, length, metadata4)
for i in range(length):
memory_buffer[i] = chr((i + 1) % 256)
self.plasma_client.seal(object_id4)
self.assertNotEqual(self.plasma_client.hash(object_id1),
self.plasma_client.hash(object_id4))
self.assertNotEqual(self.plasma_client.hash(object_id3),
self.plasma_client.hash(object_id4))
def test_many_hashes(self):
hashes = []
length = 2 ** 10
for i in range(256):
object_id = random_object_id()
memory_buffer = self.plasma_client.create(object_id, length)
for j in range(length):
memory_buffer[j] = chr(i)
self.plasma_client.seal(object_id)
hashes.append(self.plasma_client.hash(object_id))
# Create objects of varying length. Each pair has two bits different.
for i in range(length):
object_id = random_object_id()
memory_buffer = self.plasma_client.create(object_id, length)
for j in range(length):
memory_buffer[j] = chr(0)
memory_buffer[i] = chr(1)
self.plasma_client.seal(object_id)
hashes.append(self.plasma_client.hash(object_id))
# Create objects of varying length, all with value 0.
for i in range(length):
object_id = random_object_id()
memory_buffer = self.plasma_client.create(object_id, i)
for j in range(i):
memory_buffer[j] = chr(0)
self.plasma_client.seal(object_id)
hashes.append(self.plasma_client.hash(object_id))
# Check that all hashes were unique.
self.assertEqual(len(set(hashes)), 256 + length + length)
# def test_individual_delete(self):
# length = 100
# # Create an object id string.
# object_id = random_object_id()
# # Create a random metadata string.
# metadata = generate_metadata(100)
# # Create a new buffer and write to it.
# memory_buffer = self.plasma_client.create(object_id, length, metadata)
# for i in range(length):
# memory_buffer[i] = chr(i % 256)
# # Seal the object.
# self.plasma_client.seal(object_id)
# # Check that the object is present.
# self.assertTrue(self.plasma_client.contains(object_id))
# # Delete the object.
# self.plasma_client.delete(object_id)
# # Make sure the object is no longer present.
# self.assertFalse(self.plasma_client.contains(object_id))
#
# def test_delete(self):
# # Create some objects.
# object_ids = [random_object_id() for _ in range(100)]
# for object_id in object_ids:
# length = 100
# # Create a random metadata string.
# metadata = generate_metadata(100)
# # Create a new buffer and write to it.
# memory_buffer = self.plasma_client.create(object_id, length, metadata)
# for i in range(length):
# memory_buffer[i] = chr(i % 256)
# # Seal the object.
# self.plasma_client.seal(object_id)
# # Check that the object is present.
# self.assertTrue(self.plasma_client.contains(object_id))
#
# # Delete the objects and make sure they are no longer present.
# for object_id in object_ids:
# # Delete the object.
# self.plasma_client.delete(object_id)
# # Make sure the object is no longer present.
# self.assertFalse(self.plasma_client.contains(object_id))
def test_illegal_functionality(self):
# Create an object id string.
object_id = random_object_id()
# Create a new buffer and write to it.
length = 1000
memory_buffer = self.plasma_client.create(object_id, length)
# Make sure we cannot access memory out of bounds.
self.assertRaises(Exception, lambda: memory_buffer[length])
# Seal the object.
self.plasma_client.seal(object_id)
# This test is commented out because it currently fails.
# # Make sure the object is ready only now.
# def illegal_assignment():
# memory_buffer[0] = chr(0)
# self.assertRaises(Exception, illegal_assignment)
# Get the object.
memory_buffer = self.plasma_client.get([object_id])[0]
# Make sure the object is read only.
def illegal_assignment():
memory_buffer[0] = chr(0)
self.assertRaises(Exception, illegal_assignment)
def test_evict(self):
client = self.plasma_client2
object_id1 = random_object_id()
b1 = client.create(object_id1, 1000)
client.seal(object_id1)
del b1
self.assertEqual(client.evict(1), 1000)
object_id2 = random_object_id()
object_id3 = random_object_id()
b2 = client.create(object_id2, 999)
b3 = client.create(object_id3, 998)
client.seal(object_id3)
del b3
self.assertEqual(client.evict(1000), 998)
object_id4 = random_object_id()
b4 = client.create(object_id4, 997)
client.seal(object_id4)
del b4
client.seal(object_id2)
del b2
self.assertEqual(client.evict(1), 997)
self.assertEqual(client.evict(1), 999)
object_id5 = random_object_id()
object_id6 = random_object_id()
object_id7 = random_object_id()
b5 = client.create(object_id5, 996)
b6 = client.create(object_id6, 995)
b7 = client.create(object_id7, 994)
client.seal(object_id5)
client.seal(object_id6)
client.seal(object_id7)
del b5
del b6
del b7
self.assertEqual(client.evict(2000), 996 + 995 + 994)
def test_subscribe(self):
# Subscribe to notifications from the Plasma Store.
self.plasma_client.subscribe()
for i in [1, 10, 100, 1000, 10000, 100000]:
object_ids = [random_object_id() for _ in range(i)]
metadata_sizes = [np.random.randint(1000) for _ in range(i)]
data_sizes = [np.random.randint(1000) for _ in range(i)]
for j in range(i):
self.plasma_client.create(
object_ids[j], size=data_sizes[j],
metadata=bytearray(np.random.bytes(metadata_sizes[j])))
self.plasma_client.seal(object_ids[j])
# Check that we received notifications for all of the objects.
for j in range(i):
notification_info = self.plasma_client.get_next_notification()
recv_objid, recv_dsize, recv_msize = notification_info
self.assertEqual(object_ids[j], recv_objid)
self.assertEqual(data_sizes[j], recv_dsize)
self.assertEqual(metadata_sizes[j], recv_msize)
def test_subscribe_deletions(self):
# Subscribe to notifications from the Plasma Store. We use plasma_client2
# to make sure that all used objects will get evicted properly.
self.plasma_client2.subscribe()
for i in [1, 10, 100, 1000, 10000, 100000]:
object_ids = [random_object_id() for _ in range(i)]
# Add 1 to the sizes to make sure we have nonzero object sizes.
metadata_sizes = [np.random.randint(1000) + 1 for _ in range(i)]
data_sizes = [np.random.randint(1000) + 1 for _ in range(i)]
for j in range(i):
x = self.plasma_client2.create(
object_ids[j], size=data_sizes[j],
metadata=bytearray(np.random.bytes(metadata_sizes[j])))
self.plasma_client2.seal(object_ids[j])
del x
# Check that we received notifications for creating all of the objects.
for j in range(i):
notification_info = self.plasma_client2.get_next_notification()
recv_objid, recv_dsize, recv_msize = notification_info
self.assertEqual(object_ids[j], recv_objid)
self.assertEqual(data_sizes[j], recv_dsize)
self.assertEqual(metadata_sizes[j], recv_msize)
# Check that we receive notifications for deleting all objects, as we
# evict them.
for j in range(i):
self.assertEqual(self.plasma_client2.evict(1),
data_sizes[j] + metadata_sizes[j])
notification_info = self.plasma_client2.get_next_notification()
recv_objid, recv_dsize, recv_msize = notification_info
self.assertEqual(object_ids[j], recv_objid)
self.assertEqual(-1, recv_dsize)
self.assertEqual(-1, recv_msize)
# Test multiple deletion notifications. The first 9 object IDs have size 0,
# and the last has a nonzero size. When Plasma evicts 1 byte, it will evict
# all objects, so we should receive deletion notifications for each.
num_object_ids = 10
object_ids = [random_object_id() for _ in range(num_object_ids)]
metadata_sizes = [0] * (num_object_ids - 1)
data_sizes = [0] * (num_object_ids - 1)
metadata_sizes.append(np.random.randint(1000))
data_sizes.append(np.random.randint(1000))
for i in range(num_object_ids):
x = self.plasma_client2.create(
object_ids[i], size=data_sizes[i],
metadata=bytearray(np.random.bytes(metadata_sizes[i])))
self.plasma_client2.seal(object_ids[i])
del x
for i in range(num_object_ids):
notification_info = self.plasma_client2.get_next_notification()
recv_objid, recv_dsize, recv_msize = notification_info
self.assertEqual(object_ids[i], recv_objid)
self.assertEqual(data_sizes[i], recv_dsize)
self.assertEqual(metadata_sizes[i], recv_msize)
self.assertEqual(self.plasma_client2.evict(1),
data_sizes[-1] + metadata_sizes[-1])
for i in range(num_object_ids):
notification_info = self.plasma_client2.get_next_notification()
recv_objid, recv_dsize, recv_msize = notification_info
self.assertEqual(object_ids[i], recv_objid)
self.assertEqual(-1, recv_dsize)
self.assertEqual(-1, recv_msize)
class TestPlasmaManager(unittest.TestCase):
def setUp(self):
# Start two PlasmaStores.
store_name1, self.p2 = plasma.start_plasma_store(use_valgrind=USE_VALGRIND)
store_name2, self.p3 = plasma.start_plasma_store(use_valgrind=USE_VALGRIND)
# Start a Redis server.
redis_address = services.address("127.0.0.1", services.start_redis()[0])
# Start two PlasmaManagers.
manager_name1, self.p4, self.port1 = plasma.start_plasma_manager(
store_name1, redis_address, use_valgrind=USE_VALGRIND)
manager_name2, self.p5, self.port2 = plasma.start_plasma_manager(
store_name2, redis_address, use_valgrind=USE_VALGRIND)
# Connect two PlasmaClients.
self.client1 = plasma.PlasmaClient(store_name1, manager_name1)
self.client2 = plasma.PlasmaClient(store_name2, manager_name2)
# Store the processes that will be explicitly killed during tearDown so
# that a test case can remove ones that will be killed during the test.
# NOTE: If this specific order is changed, valgrind will fail.
self.processes_to_kill = [self.p4, self.p5, self.p2, self.p3]
def tearDown(self):
# Check that the processes are still alive.
for process in self.processes_to_kill:
self.assertEqual(process.poll(), None)
# Kill the Plasma store and Plasma manager processes.
if USE_VALGRIND:
# Give processes opportunity to finish work.
time.sleep(1)
for process in self.processes_to_kill:
process.send_signal(signal.SIGTERM)
process.wait()
if process.returncode != 0:
print("aborting due to valgrind error")
os._exit(-1)
else:
for process in self.processes_to_kill:
process.kill()
# Clean up the Redis server.
services.cleanup()
def test_fetch(self):
for _ in range(10):
# Create an object.
object_id1, memory_buffer1, metadata1 = create_object(self.client1, 2000,
2000)
self.client1.fetch([object_id1])
self.assertEqual(self.client1.contains(object_id1), True)
self.assertEqual(self.client2.contains(object_id1), False)
# Fetch the object from the other plasma manager.
# TODO(rkn): Right now we must wait for the object table to be updated.
while not self.client2.contains(object_id1):
self.client2.fetch([object_id1])
# Compare the two buffers.
assert_get_object_equal(self, self.client1, self.client2, object_id1,
memory_buffer=memory_buffer1, metadata=metadata1)
# Test that we can call fetch on object IDs that don't exist yet.
object_id2 = random_object_id()
self.client1.fetch([object_id2])
self.assertEqual(self.client1.contains(object_id2), False)
memory_buffer2, metadata2 = create_object_with_id(self.client2, object_id2,
2000, 2000)
# # Check that the object has been fetched.
# self.assertEqual(self.client1.contains(object_id2), True)
# Compare the two buffers.
# assert_get_object_equal(self, self.client1, self.client2, object_id2,
# memory_buffer=memory_buffer2, metadata=metadata2)
# Test calling the same fetch request a bunch of times.
object_id3 = random_object_id()
self.assertEqual(self.client1.contains(object_id3), False)
self.assertEqual(self.client2.contains(object_id3), False)
for _ in range(10):
self.client1.fetch([object_id3])
self.client2.fetch([object_id3])
memory_buffer3, metadata3 = create_object_with_id(self.client1, object_id3,
2000, 2000)
for _ in range(10):
self.client1.fetch([object_id3])
self.client2.fetch([object_id3])
# TODO(rkn): Right now we must wait for the object table to be updated.
while not self.client2.contains(object_id3):
self.client2.fetch([object_id3])
assert_get_object_equal(self, self.client1, self.client2, object_id3,
memory_buffer=memory_buffer3, metadata=metadata3)
def test_fetch_multiple(self):
for _ in range(20):
# Create two objects and a third fake one that doesn't exist.
object_id1, memory_buffer1, metadata1 = create_object(self.client1, 2000,
2000)
missing_object_id = random_object_id()
object_id2, memory_buffer2, metadata2 = create_object(self.client1, 2000,
2000)
object_ids = [object_id1, missing_object_id, object_id2]
# Fetch the objects from the other plasma store. The second object ID
# should timeout since it does not exist.
# TODO(rkn): Right now we must wait for the object table to be updated.
while ((not self.client2.contains(object_id1)) or
(not self.client2.contains(object_id2))):
self.client2.fetch(object_ids)
# Compare the buffers of the objects that do exist.
assert_get_object_equal(self, self.client1, self.client2, object_id1,
memory_buffer=memory_buffer1, metadata=metadata1)
assert_get_object_equal(self, self.client1, self.client2, object_id2,
memory_buffer=memory_buffer2, metadata=metadata2)
# Fetch in the other direction. The fake object still does not exist.
self.client1.fetch(object_ids)
assert_get_object_equal(self, self.client2, self.client1, object_id1,
memory_buffer=memory_buffer1, metadata=metadata1)
assert_get_object_equal(self, self.client2, self.client1, object_id2,
memory_buffer=memory_buffer2, metadata=metadata2)
# Check that we can call fetch with duplicated object IDs.
object_id3 = random_object_id()
self.client1.fetch([object_id3, object_id3])
object_id4, memory_buffer4, metadata4 = create_object(self.client1, 2000,
2000)
time.sleep(0.1)
# TODO(rkn): Right now we must wait for the object table to be updated.
while not self.client2.contains(object_id4):
self.client2.fetch([object_id3, object_id3, object_id4, object_id4])
assert_get_object_equal(self, self.client2, self.client1, object_id4,
memory_buffer=memory_buffer4, metadata=metadata4)
def test_wait(self):
# Test timeout.
obj_id0 = random_object_id()
self.client1.wait([obj_id0], timeout=100, num_returns=1)
# If we get here, the test worked.
# Test wait if local objects available.
obj_id1 = random_object_id()
self.client1.create(obj_id1, 1000)
self.client1.seal(obj_id1)
ready, waiting = self.client1.wait([obj_id1], timeout=100, num_returns=1)
self.assertEqual(set(ready), set([obj_id1]))
self.assertEqual(waiting, [])
# Test wait if only one object available and only one object waited for.
obj_id2 = random_object_id()
self.client1.create(obj_id2, 1000)
# Don't seal.
ready, waiting = self.client1.wait([obj_id2, obj_id1], timeout=100,
num_returns=1)
self.assertEqual(set(ready), set([obj_id1]))
self.assertEqual(set(waiting), set([obj_id2]))
# Test wait if object is sealed later.
obj_id3 = random_object_id()
def finish():
self.client2.create(obj_id3, 1000)
self.client2.seal(obj_id3)
t = threading.Timer(0.1, finish)
t.start()
ready, waiting = self.client1.wait([obj_id3, obj_id2, obj_id1],
timeout=1000, num_returns=2)
self.assertEqual(set(ready), set([obj_id1, obj_id3]))
self.assertEqual(set(waiting), set([obj_id2]))
# Test if the appropriate number of objects is shown if some objects are
# not ready.
ready, waiting = self.client1.wait([obj_id3, obj_id2, obj_id1], 100, 3)
self.assertEqual(set(ready), set([obj_id1, obj_id3]))
self.assertEqual(set(waiting), set([obj_id2]))
# Don't forget to seal obj_id2.
self.client1.seal(obj_id2)
# Test calling wait a bunch of times.
object_ids = []
# TODO(rkn): Increasing n to 100 (or larger) will cause failures. The
# problem appears to be that the number of timers added to the manager
# event loop slow down the manager so much that some of the asynchronous
# Redis commands timeout triggering fatal failure callbacks.
n = 40
for i in range(n * (n + 1) // 2):
if i % 2 == 0:
object_id, _, _ = create_object(self.client1, 200, 200)
else:
object_id, _, _ = create_object(self.client2, 200, 200)
object_ids.append(object_id)
# Try waiting for all of the object IDs on the first client.
waiting = object_ids
retrieved = []
for i in range(1, n + 1):
ready, waiting = self.client1.wait(waiting, timeout=1000, num_returns=i)
self.assertEqual(len(ready), i)
retrieved += ready
self.assertEqual(set(retrieved), set(object_ids))
ready, waiting = self.client1.wait(object_ids, timeout=1000,
num_returns=len(object_ids))
self.assertEqual(set(ready), set(object_ids))
self.assertEqual(waiting, [])
# Try waiting for all of the object IDs on the second client.
waiting = object_ids
retrieved = []
for i in range(1, n + 1):
ready, waiting = self.client2.wait(waiting, timeout=1000, num_returns=i)
self.assertEqual(len(ready), i)
retrieved += ready
self.assertEqual(set(retrieved), set(object_ids))
ready, waiting = self.client2.wait(object_ids, timeout=1000,
num_returns=len(object_ids))
self.assertEqual(set(ready), set(object_ids))
self.assertEqual(waiting, [])
# Make sure that wait returns when the requested number of object IDs are
# available and does not wait for all object IDs to be available.
object_ids = [random_object_id() for _ in range(9)] + [20 * b'\x00']
object_ids_perm = object_ids[:]
random.shuffle(object_ids_perm)
for i in range(10):
if i % 2 == 0:
create_object_with_id(self.client1, object_ids_perm[i], 2000, 2000)
else:
create_object_with_id(self.client2, object_ids_perm[i], 2000, 2000)
ready, waiting = self.client1.wait(object_ids, num_returns=(i + 1))
self.assertEqual(set(ready), set(object_ids_perm[:(i + 1)]))
self.assertEqual(set(waiting), set(object_ids_perm[(i + 1):]))
def test_transfer(self):
num_attempts = 100
for _ in range(100):
# Create an object.
object_id1, memory_buffer1, metadata1 = create_object(self.client1, 2000,
2000)
# Transfer the buffer to the the other Plasma store. There is a race
# condition on the create and transfer of the object, so keep trying
# until the object appears on the second Plasma store.
for i in range(num_attempts):
self.client1.transfer("1192.168.3.11", self.port2, object_id1)
buff = self.client2.get([object_id1], timeout_ms=100)[0]
if buff is not None:
break
self.assertNotEqual(buff, None)
del buff
# Compare the two buffers.
assert_get_object_equal(self, self.client1, self.client2, object_id1,
memory_buffer=memory_buffer1, metadata=metadata1)
# # Transfer the buffer again.
# self.client1.transfer("127.0.0.1", self.port2, object_id1)
# # Compare the two buffers.
# assert_get_object_equal(self, self.client1, self.client2, object_id1,
# memory_buffer=memory_buffer1,
# metadata=metadata1)
# Create an object.
object_id2, memory_buffer2, metadata2 = create_object(self.client2,
20000, 20000)
# Transfer the buffer to the the other Plasma store. There is a race
# condition on the create and transfer of the object, so keep trying
# until the object appears on the second Plasma store.
for i in range(num_attempts):
self.client2.transfer("127.0.0.1", self.port1, object_id2)
buff = self.client1.get([object_id2], timeout_ms=100)[0]
if buff is not None:
break
self.assertNotEqual(buff, None)
del buff
# Compare the two buffers.
assert_get_object_equal(self, self.client1, self.client2, object_id2,
memory_buffer=memory_buffer2, metadata=metadata2)
def test_illegal_functionality(self):
# Create an object id string.
# object_id = random_object_id()
# Create a new buffer.
# memory_buffer = self.client1.create(object_id, 20000)
# This test is commented out because it currently fails.
# # Transferring the buffer before sealing it should fail.
# self.assertRaises(Exception,
# lambda : self.manager1.transfer(1, object_id))
pass
def test_stresstest(self):
a = time.time()
object_ids = []
for i in range(10000): # TODO(pcm): increase this to 100000.
object_id = random_object_id()
object_ids.append(object_id)
self.client1.create(object_id, 1)
self.client1.seal(object_id)
for object_id in object_ids:
self.client1.transfer("127.0.0.1", self.port2, object_id)
b = time.time() - a
print("it took", b, "seconds to put and transfer the objects")
class TestPlasmaManagerRecovery(unittest.TestCase):
def setUp(self):
# Start a Plasma store.
self.store_name, self.p2 = plasma.start_plasma_store(
use_valgrind=USE_VALGRIND)
# Start a Redis server.
self.redis_address = services.address("127.0.0.1",
services.start_redis()[0])
# Start a PlasmaManagers.
manager_name, self.p3, self.port1 = plasma.start_plasma_manager(
self.store_name,
self.redis_address,
use_valgrind=USE_VALGRIND)
# Connect a PlasmaClient.
self.client = plasma.PlasmaClient(self.store_name, manager_name)
# Store the processes that will be explicitly killed during tearDown so
# that a test case can remove ones that will be killed during the test.
# NOTE: The plasma managers must be killed before the plasma store since
# plasma store death will bring down the managers.
self.processes_to_kill = [self.p3, self.p2]
def tearDown(self):
# Check that the processes are still alive.
for process in self.processes_to_kill:
self.assertEqual(process.poll(), None)
# Kill the Plasma store and Plasma manager processes.
if USE_VALGRIND:
# Give processes opportunity to finish work.
time.sleep(1)
for process in self.processes_to_kill:
process.send_signal(signal.SIGTERM)
process.wait()
if process.returncode != 0:
print("aborting due to valgrind error")
os._exit(-1)
else:
for process in self.processes_to_kill:
process.kill()
# Clean up the Redis server.
services.cleanup()
def test_delayed_start(self):
num_objects = 10
# Create some objects using one client.
object_ids = [random_object_id() for _ in range(num_objects)]
for i in range(10):
create_object_with_id(self.client, object_ids[i], 2000, 2000)
# Wait until the objects have been sealed in the store.
ready, waiting = self.client.wait(object_ids, num_returns=num_objects)
self.assertEqual(set(ready), set(object_ids))
self.assertEqual(waiting, [])
# Start a second plasma manager attached to the same store.
manager_name, self.p5, self.port2 = plasma.start_plasma_manager(
self.store_name, self.redis_address, use_valgrind=USE_VALGRIND)
self.processes_to_kill = [self.p5] + self.processes_to_kill
# Check that the second manager knows about existing objects.
client2 = plasma.PlasmaClient(self.store_name, manager_name)
ready, waiting = [], object_ids
while True:
ready, waiting = client2.wait(object_ids, num_returns=num_objects,
timeout=0)
if len(ready) == len(object_ids):
break
self.assertEqual(set(ready), set(object_ids))
self.assertEqual(waiting, [])
if __name__ == "__main__":
if len(sys.argv) > 1:
# Pop the argument so we don't mess with unittest's own argument parser.
if sys.argv[-1] == "valgrind":
arg = sys.argv.pop()
USE_VALGRIND = True
print("Using valgrind for tests")
unittest.main(verbosity=2)
|
StarcoderdataPython
|
3362661
|
import unittest
from solve import Sudoku
test_sudoku = [[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 7, 9]]
test_solution = [[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 5, 3, 4, 8],
[1, 9, 8, 3, 4, 2, 5, 6, 7],
[8, 5, 9, 7, 6, 1, 4, 2, 3],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 6, 1, 5, 3, 7, 2, 8, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 4, 5, 2, 8, 6, 1, 7, 9]]
class myTests(unittest.TestCase):
def test_solution(self):
self.assertEqual(Sudoku.solve(test_sudoku), test_solution)
def test_poss(self):
self.assertEqual(Sudoku.poss(test_sudoku, 2, 0, 4), True)
def test_convert_row(self):
test_row = '530070000'
self.assertEqual(Sudoku.convert_row(test_row), [5,3,0,0,7,0,0,0,0])
unittest.main()
|
StarcoderdataPython
|
3499494
|
import os
from flask_socketio import SocketIO
from celery.utils.log import get_task_logger
from celery.signals import after_setup_task_logger, after_setup_logger
from AXIOME3_app.extensions import celery
from AXIOME3_app.tasks.utils import (
configure_celery_task_logger,
log_status,
emit_message,
run_command,
cleanup_error_message
)
# Import from AXIOME3 pipeline
# Note PYTHONPATH is added in docker-compose.yml to enable searching in pipeline directory
from exceptions.exception import AXIOME3Error as AXIOME3PipelineError
from scripts.qiime2_helper.triplot import (
prep_triplot_input,
make_triplot,
save_plot
)
logger = get_task_logger(__name__)
@after_setup_task_logger.connect
def after_setup_celery_task_logger(logger, **kwargs):
""" This function sets the 'celery.task' logger handler and formatter """
configure_celery_task_logger(logger)
@celery.task(name="extension.triplot")
def triplot_task(_id, URL, task_progress_file, feature_table_artifact_path,
taxonomy_artifact_path, metadata_path, environmental_metadata_path,
sampling_depth, ordination_collapse_level, wascores_collapse_level,
dissmilarity_index, R2_threshold, pval_threshold, wa_threshold, fill_variable,
fill_variable_dtype, colour_set, brewer_type, point_size, alpha, stroke,
PC_axis_one, PC_axis_two, width, height, x_axis_text_size, y_axis_text_size,
legend_title_size, legend_text_size, taxa_text_size, vector_arrow_text_size):
logger.info("Triplot task started for 'session, {_id},'".format(_id=_id))
local_socketio = SocketIO(message_queue=URL)
channel = 'test'
namespace = '/AXIOME3'
room = _id
output_dir = os.path.join('/output', _id)
filename = 'plot'
message = 'Generating Triplot...'
emit_message(
socketio=local_socketio,
channel=channel,
message=message,
namespace=namespace,
room=room
)
log_status(task_progress_file, message)
try:
merged_df, vector_arrow_df, wascores_df, proportion_explained, projection_df, sample_summary = prep_triplot_input(
sample_metadata_path=metadata_path,
env_metadata_path=environmental_metadata_path,
feature_table_artifact_path=feature_table_artifact_path,
taxonomy_artifact_path=taxonomy_artifact_path,
sampling_depth=sampling_depth,
ordination_collapse_level=ordination_collapse_level,
wascores_collapse_level=wascores_collapse_level,
dissmilarity_index=dissmilarity_index,
R2_threshold=R2_threshold,
pval_threshold=pval_threshold,
wa_threshold=wa_threshold,
PC_axis_one=PC_axis_one,
PC_axis_two=PC_axis_two,
output_dir=output_dir
)
# Save vector arrow df
projection_df_fname = os.path.join(output_dir, "vector_arrow_summary.csv")
projection_df.to_csv(projection_df_fname)
# Save sample summary
sample_summary_fname = os.path.join(output_dir, "sample_summary.csv")
with open(sample_summary_fname, 'w') as fh:
fh.write(sample_summary)
# Replace with AXIOME3_Error in the future?
except AXIOME3PipelineError as err:
message = "Error: " + str(err)
emit_message(
socketio=local_socketio,
channel=channel,
message=message,
namespace=namespace,
room=room
)
log_status(task_progress_file, message)
return
except Exception as err:
logger.error(err, exc_info=True)
message = "Error: Internal Server Error..."
emit_message(
socketio=local_socketio,
channel=channel,
message=message,
namespace=namespace,
room=room
)
log_status(task_progress_file, message)
return
if(fill_variable_dtype == 'numeric'):
fill_variable_dtype = 'float64'
try:
triplot = make_triplot(
merged_df=merged_df,
vector_arrow_df=vector_arrow_df,
wascores_df=wascores_df,
proportion_explained=proportion_explained,
fill_variable=fill_variable,
fill_variable_dtype=fill_variable_dtype,
palette=colour_set,
brewer_type=brewer_type,
PC_axis_one=PC_axis_one,
PC_axis_two=PC_axis_two,
alpha=alpha,
stroke=stroke,
point_size=point_size,
x_axis_text_size=x_axis_text_size,
y_axis_text_size=y_axis_text_size,
legend_title_size=legend_title_size,
legend_text_size=legend_text_size,
taxa_text_size=taxa_text_size,
vector_arrow_text_size=vector_arrow_text_size
)
except AXIOME3PipelineError as err:
message = "Error: " + str(err)
emit_message(
socketio=local_socketio,
channel=channel,
message=message,
namespace=namespace,
room=room
)
log_status(task_progress_file, message)
return
except Exception as err:
logger.error(err, exc_info=True)
message = "Error: Internal Server Error..."
emit_message(
socketio=local_socketio,
channel=channel,
message=message,
namespace=namespace,
room=room
)
log_status(task_progress_file, message)
return
try:
# Save as pdf and png
save_plot(plot=triplot, filename=filename, output_dir=output_dir, file_format='pdf', width=float(width), height=float(height))
save_plot(plot=triplot, filename=filename, output_dir=output_dir, file_format='png', width=float(width), height=float(height))
except AXIOME3PipelineError as err:
message = "Error: " + str(err)
emit_message(
socketio=local_socketio,
channel=channel,
message=message,
namespace=namespace,
room=room
)
log_status(task_progress_file, message)
return
except Exception as err:
logger.error(err, exc_info=True)
message = "Error: Internal Server Error..."
emit_message(
socketio=local_socketio,
channel=channel,
message=message,
namespace=namespace,
room=room
)
log_status(task_progress_file, message)
return
message = "Done!"
emit_message(
socketio=local_socketio,
channel=channel,
message=message,
namespace=namespace,
room=room
)
log_status(task_progress_file, message)
|
StarcoderdataPython
|
12855931
|
<reponame>exhuma/metafilter
from ConfigParser import SafeConfigParser
from cStringIO import StringIO
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy.orm import sessionmaker
from os.path import sep
from hashlib import md5
from datetime import datetime, timedelta
import re
import logging
import functools
NON_LTREE = re.compile(r'[^a-zA-Z0-9/]')
LOG = logging.getLogger(__name__)
CONFIG = None
metadata = MetaData()
Session = sessionmaker()
def loadconfig(filename):
defaults=StringIO("""\
[cli_logging]
error_log=
""")
config = SafeConfigParser()
config.readfp(defaults)
config.read(filename)
dsn = config.get('database', 'dsn', None)
if not dsn:
raise ValueError('No DSN found in the config file! This is required!')
set_dsn(dsn)
return config
class memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
obsoletion = datetime.now() - timedelta(seconds=60*5)
if args in self.cache and self.cache[args][1] < obsoletion:
# value too old. Remove it from the cache
LOG.debug("Removing obsolete value for args %r from cache." % (args,))
del(self.cache[args])
try:
output = self.cache[args][0]
LOG.debug("Cache hit for args %r." % (args,))
return output
except KeyError:
LOG.debug("Initialising cache for args %r." % (args,))
value = self.func(*args)
if isinstance(value, sqlalchemy.orm.query.Query):
result = value.all()
self.cache[args] = (result, datetime.now())
return result
else:
self.cache[args] = (value, datetime.now())
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
LOG.warning("Uncachable function call for args %r" % (args,))
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
def uri_depth(uri):
"determines the depth of a uri"
if not uri:
return 0
if uri.endswith(sep):
uri = uri[0:-1]
return len(uri.split(sep))
def file_md5(path):
"""
Return the MD5 hash of the file
"""
hash = md5()
fptr = open(path, "rb")
chunk = fptr.read(1024)
while chunk:
hash.update(chunk)
chunk = fptr.read(1024)
fptr.close()
return hash.hexdigest()
def uri_to_ltree(uri):
if not uri or uri == "/":
return "ROOT"
if uri.endswith(sep):
uri = uri[0:-1]
if uri.startswith(sep):
ltree = "ROOT%s%s" % (sep, uri[1:])
else:
ltree = uri
# the ltree module uses "." as path separator. Replace dots by
# underscores and path separators by dots
ltree = NON_LTREE.sub("_", ltree)
ltree = ltree.replace(sep, ".")
return ltree
def set_dsn(dsn):
engine = create_engine(dsn)
metadata.bind = engine
Session.bind = engine
from metafilter.model.nodes import Node
from metafilter.model.queries import Query
from metafilter.model.tags import Tag
#
# Parse the config file
#
from os.path import join, exists, expanduser
from os import getcwd
paths = [
join(getcwd(), 'config.ini'),
join(expanduser("~"), '.metafilter', 'config.ini'),
join('/', 'etc', 'metafilter', 'config.ini'),
]
for path in paths:
if not exists(path):
continue
LOG.debug('Reading config from %s' % path)
CONFIG = loadconfig(path)
if not CONFIG:
LOG.error('Unable to open config file (search order: %s)' % (', '.join(paths)))
|
StarcoderdataPython
|
1604329
|
<reponame>csnardi/openstates-core
# Generated by Django 3.2.2 on 2021-10-13 19:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("data", "0041_personoffice"),
]
operations = [
migrations.DeleteModel(
name="PersonContactDetail",
),
]
|
StarcoderdataPython
|
8043597
|
#
# OtterTune - upload_batch.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
import logging
import os
import urllib2
import glob
import numpy as np
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
register_openers()
# Logging
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.StreamHandler())
LOG.setLevel(logging.INFO)
class ResultUploader(object):
SUMMARY_EXT = '.summary'
PARAMS_EXT = '.params'
METRICS_EXT = '.metrics'
SAMPLES_EXT = '.samples'
EXPCFG_EXT = '.expconfig'
RAW_EXT = '.csv'
REQ_EXTS = [SUMMARY_EXT, PARAMS_EXT, METRICS_EXT, SAMPLES_EXT, EXPCFG_EXT]
def __init__(self, upload_code, upload_url):
self.upload_code_ = upload_code
self.upload_url_ = upload_url
def upload_batch(self, directories, max_files=5):
for d in directories:
cluster_name = os.path.basename(d)
fnames = glob.glob(os.path.join(d, '*.summary'))
if max_files < len(fnames):
idxs = np.random.choice(len(fnames), max_files)
fnames = [fnames[i] for i in idxs]
bases = [fn.split('.summary')[0] for fn in fnames]
# Verify required extensions exist
for base in bases:
complete = True
for ext in self.REQ_EXTS:
next_file = base + ext
if not os.path.exists(next_file):
LOG.warn("WARNING: missing file %s, skipping...", next_file)
complete = False
break
if not complete:
continue
self.upload(base, cluster_name)
def upload(self, basepath, cluster_name):
exts = list(self.REQ_EXTS)
if os.path.exists(basepath + self.RAW_EXT):
exts.append(self.RAW_EXT)
fhandlers = {ext: open(basepath + ext, 'r') for ext in exts}
params = {
'upload_code': self.upload_code_,
'cluster_name': cluster_name,
'summary_data': fhandlers[self.SUMMARY_EXT],
'db_metrics_data': fhandlers[self.METRICS_EXT],
'db_parameters_data': fhandlers[self.PARAMS_EXT],
'sample_data': fhandlers[self.SAMPLES_EXT],
'benchmark_conf_data': fhandlers[self.EXPCFG_EXT],
}
if self.RAW_EXT in fhandlers:
params['raw_data'] = fhandlers[self.RAW_EXT]
datagen, headers = multipart_encode(params)
request = urllib2.Request(self.upload_url_, datagen, headers)
LOG.info(urllib2.urlopen(request).read())
for fh in fhandlers.values():
fh.close()
def main():
url = 'http://0.0.0.0:8000/new_result/'
upload_code = 'O50GE1HC8S1BHU8L6F8D'
uploader = ResultUploader(upload_code, url)
dirnames = glob.glob(os.path.join(os.path.expanduser(
'~'), 'Dropbox/Apps/ottertune/data/sample_data/exps_*'))[:2]
uploader.upload_batch(dirnames, max_files=3)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
46955
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from company.models import Company
from company.tasks import deploy_new_company
@receiver(post_save, sender=Company)
def company_created(sender, instance, created, **kwargs):
if created:
deploy_new_company.delay(instance.id)
|
StarcoderdataPython
|
1999326
|
<filename>examples/simulations/gyroscope1.py
"""
Simulation of a gyroscope hanging from a spring.
"""
# (adapted by <NAME> from <NAME>, 2009)
from __future__ import division, print_function
from vtkplotter import *
# ############################################################ parameters
dt = 0.005 # time step
ks = 15 # spring stiffness
Lrest = 1 # unstretched length of spring
Ls = 1 # length of gyroscope shaft
M = 1 # mass of gyroscope (massless shaft)
R = 0.4 # radius of gyroscope rotor
omega = 50 # angular velocity of rotor (rad/s, not shown)
gpos = vector(0, 0, 0) # initial position of spring free end
# ############################################################ inits
top = vector(0, 2, 0) # where top of spring is held
precess = vector(0, 0, 0) # initial momentum of center of mass
Fgrav = vector(0, -M * 9.81, 0)
gaxis = vector(0, 0, 1) # initial orientation of gyroscope
gaxis = versor(gaxis)
I = 1 / 2 * M * R ** 2 # moment of inertia of gyroscope
Lrot = I * omega * gaxis # angular momentum
cm = gpos + 0.5 * Ls * gaxis # center of mass of shaft
# ############################################################ the scene
vp = Plotter(axes=0, interactive=0, bg="w")
vp += Text(__doc__)
shaft = Cylinder([[0, 0, 0], Ls * gaxis], r=0.03, c="dg")
rotor = Cylinder([(Ls - 0.55) * gaxis, (Ls - 0.45) * gaxis], r=R, c="t")
bar = Cylinder([Ls*gaxis/2-R*vector(0,1,0), Ls*gaxis/2+R*vector(0,1,0)], r=R/6, c="r")
gyro = shaft + rotor + bar # group actors into a single one
spring = Spring(top, gpos, r=0.06, thickness=0.01, c="gray")
vp += [gyro, spring] # add it to Plotter.
vp += Box(top, length=0.2, width=0.02, height=0.2, c="gray")
vp += Box(pos=(0, 0.5, 0), length=2.6, width=3, height=2.6, c="gray", alpha=0.2).wireframe(1)
# ############################################################ the physics
pb = ProgressBar(0, 5, dt, c="b")
for t in pb.range():
Fspring = -ks * versor(gpos - top) * (mag(gpos - top) - Lrest)
torque = cross(-1 / 2 * Ls * versor(Lrot), Fspring) # torque about center of mass
Lrot += torque * dt
precess += (Fgrav + Fspring) * dt # momentum of center of mass
cm += (precess / M) * dt
gpos = cm - 1 / 2 * Ls * versor(Lrot)
# set orientation along gaxis and rotate it around its axis by omega*t degrees
gyro.orientation(Lrot, rotation=omega * t, rad=True).pos(gpos)
spring.stretch(top, gpos)
vp.show()
pb.print()
vp.show(interactive=1)
|
StarcoderdataPython
|
3395042
|
# -*- coding: utf-8 -*-
"""
======================================================
Test_mtom_attachment :mod:`tests.test_mtom_attachment`
======================================================
"""
import os
from os.path import join, basename
import tempfile
import shutil
import email
import hashlib
from lxml import etree
import pytest
import requests_mock
from zeep import client
from zeep import ns
from zeep import transport_with_attach as twa
TMP_DIR = tempfile.gettempdir()
TMP_OUT = join(TMP_DIR, 'mtom_test', 'out')
TMP_IN = join(TMP_DIR, 'mtom_test', 'in')
MB = 2**20
RESPONSE = b"""
<?xml version="1.0"?>
<soapenv:Envelope
xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:exam="http://example.com/">
<soapenv:Header/>
<soapenv:Body>
<exam:uploadResponse>
<message>ok</message>
</exam:uploadResponse>
</soapenv:Body>
</soapenv:Envelope>""".strip()
CLIENT = client.Client(
'tests/wsdl_files/mtom_attachment.wsdl', transport=twa.TransportWithAttach())
@pytest.mark.requests
def create_service():
"""Create service"""
return CLIENT.create_service(
'{http://test.ellethee.org/}MtomAttachmentBinding',
'http://test.python-zeep.org/x')
def create_random_file(filename, size=1024):
"""Create random file"""
with open(filename, 'wb') as fout:
fout.write(os.urandom(size))
def get_parts(string):
"""get parts"""
msg = twa.get_multipart()
msg.set_payload(string)
msg = email.message_from_string(msg.as_string())
parts = []
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
cid = part.get("Content-ID").strip("<>")
if cid:
item = {
'cid': cid,
'cte': part.get("Content-Transfer-Encoding"),
}
if item['cte'] == 'binary':
item['payload'] = join(TMP_IN, cid[twa.ID_LEN + 1:])
with open(item['payload'], 'wb') as fobj:
fobj.write(part.get_payload())
else:
item['payload'] = part.get_payload()
parts.append(item)
return parts
def create_tmp_dirs():
"""create_tmp_dirs"""
try:
os.makedirs(TMP_OUT)
except OSError:
pass
try:
os.makedirs(TMP_IN)
except OSError:
pass
def remove_tmp_dirs():
"""removes tmp dirs"""
shutil.rmtree(TMP_OUT)
shutil.rmtree(TMP_IN)
def get_file_md5(filename, blocksize=2**20):
"""get file md5"""
md5 = hashlib.md5()
with open(filename, "rb") as fin:
while True:
buf = fin.read(blocksize)
if not buf:
break
md5.update(buf)
return md5.hexdigest()
def test_multi_upload():
"""Test multiUpload"""
service = create_service()
create_tmp_dirs()
files = [1, 2, 3, 10]
for idx, size in enumerate(files):
filename = join(TMP_OUT, 'test_file_{}'.format(idx))
create_random_file(filename, size * MB)
files[idx] = {'fileName': basename(filename),
"fileBytes": CLIENT.attach(filename)}
with requests_mock.mock() as rmock:
rmock.post('http://test.python-zeep.org/x', text=RESPONSE)
result = service.multiUpload(files)
assert result == "ok"
parts = get_parts(rmock.request_history[0].body)
xml = etree.fromstring(parts[0]['payload'])
items = xml.findall(".//{http://test.ellethee.org/}arrayOfUpload/item")
for item in items:
filename = item.find("fileName").text
assert get_file_md5(
join(TMP_IN, filename)) == get_file_md5(join(TMP_OUT, filename))
remove_tmp_dirs()
def test_upload():
"""Test Upload"""
service = create_service()
create_tmp_dirs()
filename = join(TMP_OUT, 'test_file')
create_random_file(filename, 15 * MB)
with requests_mock.mock() as rmock:
rmock.post('http://test.python-zeep.org/x', text=RESPONSE)
result = service.upload(basename(filename), CLIENT.attach(filename))
assert result == "ok"
parts = get_parts(rmock.request_history[0].body)
xml = etree.fromstring(parts[0]['payload'])
items = xml.findall(".//{http://test.ellethee.org/}arrayOfUpload/item")
for item in items:
filename = item.find("fileName").text
assert get_file_md5(
join(TMP_IN, filename)) == get_file_md5(join(TMP_OUT, filename))
remove_tmp_dirs()
|
StarcoderdataPython
|
11309662
|
from pyfann import libfann
connection_rate = 1
learning_rate = 0.7
num_input = 2
num_hidden = 4
num_output = 1
desired_error = 0.0001
max_iterations = 100000
iterations_between_reports = 1000
ann = libfann.neural_net()
ann.create_sparse_array(connection_rate, (num_input, num_hidden, num_output))
ann.set_learning_rate(learning_rate)
ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC_STEPWISE)
ann.train_on_file("sample.data", max_iterations, iterations_between_reports, desired_error)
ann.save("sample.net")
|
StarcoderdataPython
|
343692
|
from django import forms
from django.shortcuts import get_object_or_404
from .models import Event
from ..users.models import User
class ConfirmAttendanceForm(forms.Form):
def confirm_attendance(self):
print(self.data)
event_id = int(self.data['event_id'])
username = self.data['username']
event = get_object_or_404(Event, pk=event_id)
user = get_object_or_404(User, username=username)
event.attendees.add(user)
pass
|
StarcoderdataPython
|
6458303
|
#%%
'''
http://epistasislab.github.io/tpot/api/#classification
https://machinelearningmastery.com/tpot-for-automated-machine-learning-in-python/
'''
import os, sys
import pandas as pd
import numpy as np
import joblib
from sklearn.model_selection import train_test_split
import pathlib
from tpot import TPOTClassifier
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import classification_report, confusion_matrix
from notify_run import Notify
#%%abrir csv
#path = "/home/nacho/Documents/coronavirus/COVID-19_Paper/"
path = "/lustre/home/idvperez/COVID-19_Paper/"
os.chdir(os.path.join(path))
#path = pathlib.Path(__file__).parent.absolute()
#os.chdir(path)
print(os.getcwd())
#data_percentage = 0.01
data_percentage = 1
#%%Valida si existen las carpetas
os.makedirs("tpot/models", exist_ok = True)
#%%
notify = Notify()
channel = notify.register()
endpoint = channel.endpoint
print(endpoint) # https://notify.run/<channel_code>
channel_page = channel.channel_page
print(channel_page) # https://notify.run/c/<channel_page_code>
#%%iter pred files
def pred_label(filename):
if filename.find('df_caso0') != -1:
label = 'hosp_critica'
if filename.find('caso1') != -1:
label = 'TIPO_PACIENTE'
if filename.find('caso2') != -1 or filename.find('caso3') != -1 or filename.find('df_caso_3_1') != -1 or filename.find('df_caso_3_2') != -1 or filename.find('df_caso_3_3') != -1:
label = 'BOOL_DEF'
if filename.find('caso5') != -1 or filename.find('df_caso5_1') != -1:
label = 'UCI'
if filename.find('caso6') != -1 or filename.find('caso7') != -1 or filename.find('df_caso_7_1') != -1 or filename.find('df_caso_7_2') != -1 or filename.find('df_caso_7_3') != -1:
label = 'INTUBADO'
return label
str_path = str(path)
print(str_path)
i = 1
for subdir, dirs, files in os.walk(str_path+'prediction_data'):
notify.send('Empezo el proceso TPOT')
for file in files:
if file.endswith(".zip"):
file_path = subdir + "/" + file
file_name = file.split('.', 1)[0]
print(file_name)
df_data = pd.read_csv(file_path)
df_data = df_data.sample(frac=data_percentage)
#separar datos
label = pred_label(file_name)
print(label)
X = df_data.loc[:, df_data.columns != label]
y = df_data.loc[:, label]
print(y.value_counts())
X, y = X.values, y.values
X, y = X.astype('float32'), y.astype('float32')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33,stratify=y, shuffle=True)
#---->train
#cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3)
tpot = TPOTClassifier(generations=5, population_size=50, scoring='balanced_accuracy', verbosity = 3, n_jobs = -1, cv= 5)
tpot.fit(X_train, y_train)
predictions = tpot.predict(X_test)
report = classification_report(y_test, predictions, output_dict=True)
report = pd.DataFrame(report).transpose()
#guarda el modelo y su reporte
#joblib.dump(tpot, 'tpot/'+file_name+'_tpot_model.pkl', compress = 1)
report.to_csv('tpot/models/'+file_name+'_tpot_report.csv', index=True)
tpot.export('tpot/models/'+file_name+'_tpot_pipeline.py')
notify.send("Termino dataset # " + str(i))
i = 1 + i
notify.send('Finalizo el proceso TPOT')
|
StarcoderdataPython
|
9765448
|
<gh_stars>0
#!/usr/bin/python3
import os.path
from pathlib import Path
import sys
import getpass
from evdev import InputDevice, list_devices
devices = [InputDevice(fn) for fn in list_devices()]
if len(devices) == 0:
print(
f"Could not find a RFID device, make sure it is plugged in.\nIf it is plugged in you may need to be added to the 'input' group (sudo usermod -a -G input {getpass.getuser()})"
)
sys.exit(0)
while True:
try:
i = 0
print("Choose the reader from list?")
for dev in devices:
print(f"{i} : {dev.name}")
i += 1
dev_id = int(input("Device Number: "))
devicePath = Path(os.path.dirname(os.path.realpath(__file__)))
devicePath = Path.joinpath(devicePath.parent.absolute(), "deviceName.txt")
with open(devicePath, "w") as f:
f.write(devices[dev_id].name)
print(f"'{devicePath}' created")
break
except Exception as ex:
print(f"{ex}\n")
print("done")
sys.exit(0)
|
StarcoderdataPython
|
5037755
|
<filename>src/environment/wrappers/noop_reset_env.py<gh_stars>10-100
"""An environment wrapper to preform null operations on reset."""
import gym
class NoopResetEnv(gym.Wrapper):
"""An environment wrapper to preform null operations on reset."""
def __init__(self, env, noop_max=30):
"""
Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def step(self, ac):
return self.env.step(ac)
def reset(self):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset()
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset()
return obs
# explicitly specify the external API of this module
__all__ = [NoopResetEnv.__name__]
|
StarcoderdataPython
|
1775925
|
#!/usr/bin/env python
#
# sim-lbeg.py
# for simulating LBEG beam from point A to B in the LANSCE linac
#
import sys
import os
# define directory to packages and append to $PATH
par_dir = os.path.abspath(os.path.pardir)
print par_dir
lib_dir = os.path.join(par_dir,"bin")
print lib_dir
sys.path.append(lib_dir)
pkg_dir = os.path.join(par_dir,"pylib")
print pkg_dir
sys.path.append(pkg_dir)
#import additional python packages
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import math
# import additional simulation packages
import hpsim as hps
import HPSim as HPSim
# use next line to select either GPU 0 or 2 on aothpsim
GPU = 0
hps.set_gpu(GPU)
import lcsutil as lcs
import nputil as npu
import sqldb as pydb
################################################################################
# install db's and connect to beamline
db_dir = par_dir + '/db'
lib_dir = par_dir + '/db/lib'
dbs = ['tbtd.db','dtl.db','trst.db','ccl.db']
dbconn1 = hps.DBConnection(db_dir, dbs, lib_dir, 'libsqliteext.so')
dbconn1.print_dbs()
dbconn1.clear_model_index()
print "*** dB connection established ***"
################################################################################
# create beamline
bl = hps.BeamLine()
beamline = hps.get_element_list()
print "*** Beamline created ***"
################################################################################
# create table of beamline elements at lengths
pybl = pydb.Db_bl(db_dir, dbs)
py_beamline = pybl.get_bl_elem_len()
print "*** PySQLite Beamline created ***"
################################################################################
# create H- beam
SIM_START = "TBDB02" #defined by input beam location
#beam = hps.Beam(mass=939.294, charge=-1.0, current=0.015, num=1024*256) #H- beam
beam = hps.Beam(mass=939.294, charge=-1.0, current=0.015, num=1024*256/4) #H- beam
beam.set_dc(0.095, 47.0, 0.00327, -0.102, 60.0, 0.002514, 180.0, 0.0, 0.7518) #TBDB02 20140901
beam.set_frequency(201.25)
betalambda = hps.betalambda(mass = beam.get_mass(), freq=beam.get_frequency(), w=0.750)
phi_offset = -hps.get_beamline_length(SIM_START,'BLZ')/betalambda *360
beam.set_ref_w(0.750)
beam.set_ref_phi(phi_offset)
beam.translate('phi', phi_offset)
beam.save_initial_beam()
print "*** H- Beam created ***"
################################################################################
# create spacecharge
spch = hps.SpaceCharge(nr = 32, nz = 128, interval = 0.025, adj_bunch = 3)
print "spch interval=", spch.get_interval()
print "adj_bunch=", spch.get_adj_bunch()
# define at what energy simulation stops using adjacent bunches in SC calc
spch.set_adj_bunch_cutoff_w(0.8)
# remeshing factor determines how ofter the mesh gets recalc vs scaled for SC kick
spch.set_remesh_threshold(0.02)
#spch.set_remesh_threshold(0.2)
print "cutoff w=", spch.get_adj_bunch_cutoff_w()
print "*** Space Charge Initialized ***"
################################################################################
# create simulator
sim = hps.Simulator(beam)
sim.set_space_charge('on')
print "*** Simulator Initialized ***"
################################################################################
# STANDARD AND REQUIRED STUFF ABOVE THIS LINE
################################################################################
SIM_STOP = '48DT'
ENERGY_CUTOFF = 0.0
mask = gmask = beam.get_good_mask()
print "*** Input Beam ***"
print SIM_START
print "w/user units"
beam.print_results()
print "*** Starting Simulation ***\n"
sim.simulate(SIM_START, SIM_STOP)
# determine mask of particles used in analysis and plotting
wmask = beam.get_mask_with_limits('w', lolim = ENERGY_CUTOFF)
gmask = beam.get_good_mask(wmask)
mask = gmask
print "*** Output Beam ***"
print SIM_STOP
print "w/user units"
beam.print_results(mask)
# create output plot
plot = hps.BeamPlot(nrow=4, ncol=3, hsize=16, vsize=12)
plot.title(SIM_STOP)
plot.iso_phase_space('xxp', beam, mask, 1)
plot.iso_phase_space('yyp', beam, mask, 2)
plot.iso_phase_space('phiw', beam, mask, 3 )
plot.hist2d_phase_space('xxp', beam, mask, 4)
plot.hist2d_phase_space('yyp', beam, mask, 5)
plot.hist2d_phase_space('phiw', beam, mask, 6)
plot.profile('x', beam, mask, 7, 'g-')
plot.profile('y', beam, mask, 8, 'g-')
plot.profile('phi', beam, mask, 9, 'g-')
plot.profile('xp', beam, mask, 10, 'g-')
plot.profile('yp', beam, mask, 11, 'g-')
plot.profile('w', beam, mask, 12, 'g-')
plot.show()
exit()
|
StarcoderdataPython
|
9647169
|
<filename>CSIKit/visualization/plot_szenario.py
"""
Classes to Plot a szenario belog different measurements
"""
from cmath import phase
from typing import Dict, List, Tuple
import os
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from CSIKit.csi import IWLCSIFrame as CsiEntry
from CSIKit.visualization.graph import Graph, TupleGraph, PlotColorMap
from CSIKit.visualization.metric import Metric, TupleMetric, MatrixMetric
from CSIKit.reader.readers.read_bfee import IWLBeamformReader
class PlotableCSI():
"""
to plot csiEntrys
"""
def __init__(self, metric, graph):
# If metric and graph does not match : raise
if issubclass(metric, TupleMetric) ^ issubclass(graph,TupleGraph):
raise Exception(
f"""
boath should have the same output, but only on is Tuple
Metric:{metric.__name__}
Graph:{graph.__name__}
isTuple: {issubclass(metric, TupleMetric)} ^ {issubclass(graph,TupleGraph)}
""")
if issubclass(metric, MatrixMetric) ^ issubclass(graph,PlotColorMap):
raise Exception(
f"""
boath should have the same output, but only on is Graph
Metric:{metric.__name__}
Graph:{graph.__name__}
isTuple: {issubclass(metric, MatrixMetric)} ^ {issubclass(graph,PlotColorMap)}
""")
self._values_per_measurement: Dict[str, List] = {}
self._curr_measurement = None
self._figure = None
self.metric = metric()
self.graph = graph(self.metric)
def add_measurement(self, measurement_name: str):
""" Mark the moment, if the next noticed data are of a different measurement"""
self._curr_measurement = []
self._values_per_measurement[measurement_name] = self._curr_measurement
def notice(self, entry: CsiEntry):
if self._curr_measurement is None:
print(self._curr_measurement)
raise Exception(
"No measurement started yet. call self.add_measurement")
self._curr_measurement.append(self.metric.notice(entry))
def _plot(self):
self._figure = plt.figure()
axes_list = self.graph.plot( self._values_per_measurement)
#{self._figure.add_subplot(ax) for ax in axes_list}
def show(self):
self._plot()
self._figure.show()
def save(self, folder, prefix=""):
self._plot()
prefix = f"{prefix}-"
if not os.path.exists(folder):
os.makedirs(folder)
file_name = f"{self.metric.__class__.__name__}_{self.graph.__class__.__name__}"
path = f"./{folder}/{prefix}{file_name}.pdf".replace(" ","")
with PdfPages(path) as pdf:
pdf.savefig(self._figure, bbox_inches='tight')
class SzenarioPlotter():
"""
Plots different metrics of one szenario with multiple measurements
"""
def __init__(self, szenario_name: str,
plot_impls: List):
self.szenario_name = szenario_name
self.__measurements: Dict = {}
self.__plot_implementations: List[PlotableCSI] = [
PlotableCSI(metric, graph) for metric, graph in plot_impls]
plt.rcParams.update(
{'font.size': 22, 'font.family': "Liberation Serif"})
def add_plot(self, metric: Metric, graph: Graph):
"""
Adds PlotableCSI and give him all measurements of this szenario
"""
plotable = PlotableCSI(metric, graph)
for measurement_name in self.__measurements:
plotable.add_measurement(measurement_name)
entries = self.__measurements[measurement_name]
for entry in entries:
plotable.notice(entry)
self.__plot_implementations.append(plotable)
def add_measurement(self, name, data):
"""
add new measurement and notice all plotables about the new data
"""
if not isinstance(name, (str, int, float)):
raise Exception(f"invalid input for name")
if data and not isinstance(data[0], CsiEntry):
raise Exception(f"invalid input for data. It is {type(data)}")
self.__measurements[name] = data
for plot_impl in self.__plot_implementations:
plot_impl.add_measurement(name)
for entry in data:
if not isinstance(entry, CsiEntry):
raise Exception(
f"unclean CSI Entrys. Should be type CsiEntry, but it is {type(isinstance(entry, CsiEntry))}")
for plot_impl in self.__plot_implementations:
plot_impl.notice(entry)
def add_measurements(self, measurements: dict):
"""
add measurements by passing a dict of name to entries:list
"""
for measurement_name in measurements:
data = measurements[measurement_name]
self.add_measurement(measurement_name, data)
@classmethod
def _read_file(cls, path, filter_n_rx=True):
"""
reads .dat file and returns csiEntries
@filter_n_rx : default=True if to filter frames n_rx !=3
retruns csiEntries:list
"""
my_reader = IWLBeamformReader()
csi_data = my_reader.read_file(path)
# maybe you have to filter your entries if not all rx are used
csi_entries = []
if filter_n_rx:
csi_entries = [
frame for frame in csi_data.frames if frame.n_rx == 3]
else:
csi_entries = csi_data.frames
return csi_entries
def add_measurement_file(self, name, path: str):
"""
adds measurement by file
"""
if not isinstance(name, (str, int, float)):
raise Exception(f"invalid input for name")
if not path and not os.path.exists(path):
raise Exception(f"path {path} not exists")
entries = self._read_file(path)
self.add_measurement(name, entries)
def add_measurements_files(self, name_path: dict):
"""
adds measurements by passing a dict of name to path
"""
if not name_path or not len(name_path):
raise Exception("Nothing to add in dict")
for name in name_path:
path = name_path[name]
self.add_measurement_file(name, path)
def show(self, title=""):
"""
shows the results of the plt of the different metrics
"""
self._is_szenario_vaild()
{plotable.show() for plotable in self.__plot_implementations}
def save(self,folder="./images"):
"""
saves pdf of the plot at this szenarios
It maight be happend that if you use this within ipynb of yupiter it show also.
"""
self._is_szenario_vaild()
{plotable.save(folder, prefix=self.szenario_name) for plotable in self.__plot_implementations}
def _is_szenario_vaild(self):
"""
plots into a matplotlib axes
"""
# if no plots spezified
if not len(self.__plot_implementations) > 0:
raise Exception("define PlotableCSI before show szenario")
# if __measurements empty
if not len(self.__measurements) > 0:
raise Exception("define meassurments before show szenario")
if not isinstance(self.__measurements, dict):
raise Exception(
f"__measurements should be type dict but it is {type(self.__measurements)}")
return True
|
StarcoderdataPython
|
388869
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import importlib
import time
import random
import numpy as np
from pyhocon import ConfigFactory
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from data.modelnet import AdaptiveModelNetDataset
from utils import pointcloud_utils as put
from utils.misc import Netpara, debugPrint, setup_seed, worker_init_fn
device_id = 0
os.environ['CUDA_VISIBLE_DEVICES'] = str(device_id)
torch.cuda.set_device(device_id)
#######---------ModelNet 40------------######
trainset = AdaptiveModelNetDataset(
'modelnet/modelNet40_train_16nn_GM_adaptive_knn_sparse.npy', train=True)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=32,
shuffle=True,
num_workers=4,
worker_init_fn=worker_init_fn)
testset = AdaptiveModelNetDataset(
'modelnet/modelNet40_test_16nn_GM_adaptive_knn_sparse.npy', train=False)
testloader = torch.utils.data.DataLoader(testset,
batch_size=32,
shuffle=True,
num_workers=4,
worker_init_fn=worker_init_fn)
setup_seed(1024)
net_name = 'pointnet_cls'
if net_name == 'pointnet_cls':
from other_models.pointnet.POINTNET import PointNetCls
net = PointNetCls(k=40, feature_transform=False, device_id=device_id)
elif net_name == '':
from other_models.pointnet2.PointNet2_MsgCls import Pointnet2MSG
net = Pointnet2MSG(input_channels=0,
num_classes=40,
use_xyz=True,
device_id=device_id)
elif net_name == 'sonet':
from other_models.sonet.classifier import Model
from other_models.sonet.options import Options
from data.modelnet import ModelNet_Shrec_Loader
opt = Options().parse()
# opt.surface_normal = False
trainset = ModelNet_Shrec_Loader(opt.dataroot, 'train', opt)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.nThreads,
worker_init_fn=worker_init_fn)
testset = ModelNet_Shrec_Loader(opt.dataroot, 'test', opt)
testloader = torch.utils.data.DataLoader(testset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.nThreads,
worker_init_fn=worker_init_fn)
net = Model(opt)
Netpara(net.encoder)
Netpara(net.classifier)
elif net_name == 'sph3d':
from other_models.sph3d.SPH3D_modelnet import SPH3D
net_config = importlib.import_module('models.sph3d.modelnet_config')
net = SPH3D(input_channels=3,
class_nums=40,
config=net_config,
device_id=device_id)
elif net_name == 'ECC':
import functools
from data.modelnet import ECC_ModelNetDataset
import other_models.ecc_model.ecc as ecc
from other_models.ecc_model.ECC import ECC
import logging
'''
CUDA_VISIBLE_DEVICES=0 python main.py \
--dataset modelnet40 --test_nth_epoch 25 --lr 0.1 --lr_steps '[30,60,90]' --epochs 100 --batch_size 64 --batch_parts 4 \
--model_config 'i_1_2, c_24,b,r, c_48,b,r, m_2.5_7.5, c_48,b,r, c_48,b,r, m_7.5_22.5, c_96,b,r, m_1e10_1e10, f_64,b,r,d_0.2,f_40' \
--fnet_llbias 0 --fnet_widths '[16,32]' --pc_augm_scale 1.2 --pc_augm_mirror_prob 0.2 --pc_augm_input_dropout 0.1 \
--nworkers 3 --edgecompaction 1 --edge_mem_limit 1000 --odir results/modelnet40
'''
model_config = 'i_1_2, c_24,b,r, c_48,b,r, m_2.5_7.5, c_48,b,r, c_48,b,r, m_7.5_22.5, c_96,b,r, m_1e10_1e10, f_64,b,r,d_0.2,f_40'
net = ECC(model_config, 1, [(3) + (3)] + [16, 32], 1, 1, 1000, device_id=0)
def cloud_edge_feats(edgeattrs):
""" Defines edge features for `GraphConvInfo` in the case of point clouds. Assembles edge feature tensor given point offsets as edge attributes.
"""
columns = []
offsets = np.asarray(edgeattrs['offset'])
# todo: possible discretization, round to multiples of min(offsets[offsets>0]) ? Or k-means (slow?)?
columns.append(offsets)
p1 = np.linalg.norm(offsets, axis=1)
p2 = np.arctan2(offsets[:, 1], offsets[:, 0])
p3 = np.arccos(offsets[:, 2] / (p1 + 1e-6))
columns.extend(
[p1[:, np.newaxis], p2[:, np.newaxis], p3[:, np.newaxis]])
edgefeats = np.concatenate(columns, axis=1).astype(np.float32)
edgefeats_clust, indices = ecc.unique_rows(edgefeats)
logging.debug('Edge features: %d -> %d unique edges, %d dims',
edgefeats.shape[0], edgefeats_clust.shape[0],
edgefeats_clust.shape[1])
return torch.from_numpy(edgefeats_clust), torch.from_numpy(indices)
edge_feat_func = cloud_edge_feats
collate_func = functools.partial(
ecc.graph_info_collate_classification,
edge_func=functools.partial(edge_feat_func))
trainset = ECC_ModelNetDataset(
'modelnet/modelNet40_train_16nn_GM_adaptive_knn_sparse.npy',
pyramid_conf=net.pyramid_conf,
train=True)
trainloader = torch.utils.data.DataLoader(trainset,
collate_fn=collate_func,
batch_size=4,
shuffle=True,
num_workers=4,
worker_init_fn=worker_init_fn)
testset = ECC_ModelNetDataset(
'modelnet/modelNet40_test_16nn_GM_adaptive_knn_sparse.npy',
pyramid_conf=net.pyramid_conf,
train=False)
testloader = torch.utils.data.DataLoader(testset,
collate_fn=collate_func,
batch_size=4,
shuffle=True,
num_workers=4,
worker_init_fn=worker_init_fn)
elif net_name == 'pointConv':
from other_models.pointconv.POINTCONV import PointConvDensityClsSsg as PointConvClsSsg
net = PointConvClsSsg(input_channels=3,
num_classes=40,
device_id=device_id)
elif net_name == 'pointCNN':
from other_models.pointCNN.pointCNN import Classifier
net = Classifier(device_id=device_id)
elif net_name == 'pcnn':
from other_models.pcnn.pcnn import PCNN
from apex import amp
conf = ConfigFactory.parse_file(
'other_models/pcnn/confs/var_lesspoints.conf')
net = PCNN(conf=conf.get_config('network'),
input_channels=3,
class_nums=40,
device_id=device_id) #trainset.class_nums
net, net.optimizer = amp.initialize(net, net.optimizer,
opt_level="O1") # O is not number zero
# with amp.scale_loss(loss, optimizer) as scaled_loss:
# scaled_loss.backward()
elif net_name == 'rscnn':
from other_models.rscnn.RSCNN import RSCNN_SSN
net = RSCNN_SSN(num_classes=40,
input_channels=0,
relation_prior=1,
use_xyz=True)
elif net_name == 'dgcnn':
from other_models.dgcnn.DGCNN import DGCNN
net = DGCNN(output_channels = 40, device_id=device_id)
elif net_name == 'kdnet':
from other_models.kdnet.kdnet import KDNet
from data.modelnet import KDNet_ModelNetDataset
depth=10
trainset = KDNet_ModelNetDataset('../modelnet/data/modelNet40_train_16nn_GM_adaptive_knn_sparse.npy', depth=depth, train=True)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True, num_workers=4,worker_init_fn=worker_init_fn)
testset = KDNet_ModelNetDataset('../modelnet/data/modelNet40_test_16nn_GM_adaptive_knn_sparse.npy', depth=depth, train=False)
testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=True, num_workers=4,worker_init_fn=worker_init_fn)
setup_seed(1024)
net = KDNet(input_channels=3, num_classes=40, depth=depth, device_id=device_id)
else:
raise ValueError("not a implemented point net!")
Netpara(net)
# writer=SummaryWriter()
writer = None
load_weight = False
train_sperate = False
tic = time.time()
for epcho in range(1, 101):
net.fit(trainloader, epcho, writer)
if (epcho % 10 == 0):
net.score(testloader)
# net.score(testloader)
if writer is not None:
writer.close()
toc = time.time()
print("%.3f ms has passed" % ((toc - tic) * 1000))
print("Done!!!")
|
StarcoderdataPython
|
9648721
|
"""
Provides helper functions used throughout the InvenTree project
"""
import io
import re
import json
import os.path
from PIL import Image
from decimal import Decimal, InvalidOperation
from wsgiref.util import FileWrapper
from django.http import StreamingHttpResponse
from django.core.exceptions import ValidationError, FieldError
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import Permission
import InvenTree.version
from common.models import InvenTreeSetting
from .settings import MEDIA_URL, STATIC_URL
from common.settings import currency_code_default
from djmoney.money import Money
def getSetting(key, backup_value=None):
"""
Shortcut for reading a setting value from the database
"""
return InvenTreeSetting.get_setting(key, backup_value=backup_value)
def generateTestKey(test_name):
"""
Generate a test 'key' for a given test name.
This must not have illegal chars as it will be used for dict lookup in a template.
Tests must be named such that they will have unique keys.
"""
key = test_name.strip().lower()
key = key.replace(" ", "")
# Remove any characters that cannot be used to represent a variable
key = re.sub(r'[^a-zA-Z0-9]', '', key)
return key
def getMediaUrl(filename):
"""
Return the qualified access path for the given file,
under the media directory.
"""
return os.path.join(MEDIA_URL, str(filename))
def getStaticUrl(filename):
"""
Return the qualified access path for the given file,
under the static media directory.
"""
return os.path.join(STATIC_URL, str(filename))
def construct_absolute_url(*arg):
"""
Construct (or attempt to construct) an absolute URL from a relative URL.
This is useful when (for example) sending an email to a user with a link
to something in the InvenTree web framework.
This requires the BASE_URL configuration option to be set!
"""
base = str(InvenTreeSetting.get_setting('INVENTREE_BASE_URL'))
url = '/'.join(arg)
if not base:
return url
# Strip trailing slash from base url
if base.endswith('/'):
base = base[:-1]
if url.startswith('/'):
url = url[1:]
url = f"{base}/{url}"
return url
def getBlankImage():
"""
Return the qualified path for the 'blank image' placeholder.
"""
return getStaticUrl("img/blank_image.png")
def getBlankThumbnail():
"""
Return the qualified path for the 'blank image' thumbnail placeholder.
"""
return getStaticUrl("img/blank_image.thumbnail.png")
def TestIfImage(img):
""" Test if an image file is indeed an image """
try:
Image.open(img).verify()
return True
except:
return False
def TestIfImageURL(url):
""" Test if an image URL (or filename) looks like a valid image format.
Simply tests the extension against a set of allowed values
"""
return os.path.splitext(os.path.basename(url))[-1].lower() in [
'.jpg', '.jpeg',
'.png', '.bmp',
'.tif', '.tiff',
'.webp', '.gif',
]
def str2bool(text, test=True):
""" Test if a string 'looks' like a boolean value.
Args:
text: Input text
test (default = True): Set which boolean value to look for
Returns:
True if the text looks like the selected boolean value
"""
if test:
return str(text).lower() in ['1', 'y', 'yes', 't', 'true', 'ok', 'on', ]
else:
return str(text).lower() in ['0', 'n', 'no', 'none', 'f', 'false', 'off', ]
def is_bool(text):
"""
Determine if a string value 'looks' like a boolean.
"""
if str2bool(text, True):
return True
elif str2bool(text, False):
return True
else:
return False
def isNull(text):
"""
Test if a string 'looks' like a null value.
This is useful for querying the API against a null key.
Args:
text: Input text
Returns:
True if the text looks like a null value
"""
return str(text).strip().lower() in ['top', 'null', 'none', 'empty', 'false', '-1', '']
def normalize(d):
"""
Normalize a decimal number, and remove exponential formatting.
"""
if type(d) is not Decimal:
d = Decimal(d)
d = d.normalize()
# Ref: https://docs.python.org/3/library/decimal.html
return d.quantize(Decimal(1)) if d == d.to_integral() else d.normalize()
def increment(n):
"""
Attempt to increment an integer (or a string that looks like an integer!)
e.g.
001 -> 002
2 -> 3
AB01 -> AB02
QQQ -> QQQ
"""
value = str(n).strip()
# Ignore empty strings
if not value:
return value
pattern = r"(.*?)(\d+)?$"
result = re.search(pattern, value)
# No match!
if result is None:
return value
groups = result.groups()
# If we cannot match the regex, then simply return the provided value
if not len(groups) == 2:
return value
prefix, number = groups
# No number extracted? Simply return the prefix (without incrementing!)
if not number:
return prefix
# Record the width of the number
width = len(number)
try:
number = int(number) + 1
number = str(number)
except ValueError:
pass
number = number.zfill(width)
return prefix + number
def decimal2string(d):
"""
Format a Decimal number as a string,
stripping out any trailing zeroes or decimal points.
Essentially make it look like a whole number if it is one.
Args:
d: A python Decimal object
Returns:
A string representation of the input number
"""
if type(d) is Decimal:
d = normalize(d)
try:
# Ensure that the provided string can actually be converted to a float
float(d)
except ValueError:
# Not a number
return str(d)
s = str(d)
# Return entire number if there is no decimal place
if '.' not in s:
return s
return s.rstrip("0").rstrip(".")
def decimal2money(d, currency=None):
"""
Format a Decimal number as Money
Args:
d: A python Decimal object
currency: Currency of the input amount, defaults to default currency in settings
Returns:
A Money object from the input(s)
"""
if not currency:
currency = currency_code_default()
return Money(d, currency)
def WrapWithQuotes(text, quote='"'):
""" Wrap the supplied text with quotes
Args:
text: Input text to wrap
quote: Quote character to use for wrapping (default = "")
Returns:
Supplied text wrapped in quote char
"""
if not text.startswith(quote):
text = quote + text
if not text.endswith(quote):
text = text + quote
return text
def MakeBarcode(object_name, object_pk, object_data=None, **kwargs):
""" Generate a string for a barcode. Adds some global InvenTree parameters.
Args:
object_type: string describing the object type e.g. 'StockItem'
object_id: ID (Primary Key) of the object in the database
object_url: url for JSON API detail view of the object
data: Python dict object containing extra datawhich will be rendered to string (must only contain stringable values)
Returns:
json string of the supplied data plus some other data
"""
if object_data is None:
object_data = {}
url = kwargs.get('url', False)
brief = kwargs.get('brief', True)
data = {}
if url:
request = object_data.get('request', None)
item_url = object_data.get('item_url', None)
absolute_url = None
if request and item_url:
absolute_url = request.build_absolute_uri(item_url)
# Return URL (No JSON)
return absolute_url
if item_url:
# Return URL (No JSON)
return item_url
elif brief:
data[object_name] = object_pk
else:
data['tool'] = 'InvenTree'
data['version'] = InvenTree.version.inventreeVersion()
data['instance'] = InvenTree.version.inventreeInstanceName()
# Ensure PK is included
object_data['id'] = object_pk
data[object_name] = object_data
return json.dumps(data, sort_keys=True)
def GetExportFormats():
""" Return a list of allowable file formats for exporting data """
return [
'csv',
'tsv',
'xls',
'xlsx',
'json',
'yaml',
]
def DownloadFile(data, filename, content_type='application/text', inline=False):
"""
Create a dynamic file for the user to download.
Args:
data: Raw file data (string or bytes)
filename: Filename for the file download
content_type: Content type for the download
inline: Download "inline" or as attachment? (Default = attachment)
Return:
A StreamingHttpResponse object wrapping the supplied data
"""
filename = WrapWithQuotes(filename)
if type(data) == str:
wrapper = FileWrapper(io.StringIO(data))
else:
wrapper = FileWrapper(io.BytesIO(data))
response = StreamingHttpResponse(wrapper, content_type=content_type)
response['Content-Length'] = len(data)
disposition = "inline" if inline else "attachment"
response['Content-Disposition'] = f'{disposition}; filename={filename}'
return response
def extract_serial_numbers(serials, expected_quantity, next_number: int):
"""
Attempt to extract serial numbers from an input string:
Requirements:
- Serial numbers can be either strings, or integers
- Serial numbers can be split by whitespace / newline / commma chars
- Serial numbers can be supplied as an inclusive range using hyphen char e.g. 10-20
- Serial numbers can be defined as ~ for getting the next available serial number
- Serial numbers can be supplied as <start>+ for getting all expecteded numbers starting from <start>
- Serial numbers can be supplied as <start>+<length> for getting <length> numbers starting from <start>
Args:
serials: input string with patterns
expected_quantity: The number of (unique) serial numbers we expect
next_number(int): the next possible serial number
"""
serials = serials.strip()
# fill in the next serial number into the serial
while '~' in serials:
serials = serials.replace('~', str(next_number), 1)
next_number += 1
# Split input string by whitespace or comma (,) characters
groups = re.split(r"[\s,]+", serials)
numbers = []
errors = []
# Helper function to check for duplicated numbers
def add_sn(sn):
# Attempt integer conversion first, so numerical strings are never stored
try:
sn = int(sn)
except ValueError:
pass
if sn in numbers:
errors.append(_('Duplicate serial: {sn}').format(sn=sn))
else:
numbers.append(sn)
try:
expected_quantity = int(expected_quantity)
except ValueError:
raise ValidationError([_("Invalid quantity provided")])
if len(serials) == 0:
raise ValidationError([_("Empty serial number string")])
# If the user has supplied the correct number of serials, don't process them for groups
# just add them so any duplicates (or future validations) are checked
if len(groups) == expected_quantity:
for group in groups:
add_sn(group)
if len(errors) > 0:
raise ValidationError(errors)
return numbers
for group in groups:
group = group.strip()
# Hyphen indicates a range of numbers
if '-' in group:
items = group.split('-')
if len(items) == 2 and all([i.isnumeric() for i in items]):
a = items[0].strip()
b = items[1].strip()
try:
a = int(a)
b = int(b)
if a < b:
for n in range(a, b + 1):
add_sn(n)
else:
errors.append(_("Invalid group range: {g}").format(g=group))
except ValueError:
errors.append(_("Invalid group: {g}").format(g=group))
continue
else:
# More than 2 hyphens or non-numeric group so add without interpolating
add_sn(group)
# plus signals either
# 1: 'start+': expected number of serials, starting at start
# 2: 'start+number': number of serials, starting at start
elif '+' in group:
items = group.split('+')
# case 1, 2
if len(items) == 2:
start = int(items[0])
# case 2
if bool(items[1]):
end = start + int(items[1]) + 1
# case 1
else:
end = start + (expected_quantity - len(numbers))
for n in range(start, end):
add_sn(n)
# no case
else:
errors.append(_("Invalid group sequence: {g}").format(g=group))
# At this point, we assume that the "group" is just a single serial value
elif group:
add_sn(group)
# No valid input group detected
else:
raise ValidationError(_(f"Invalid/no group {group}"))
if len(errors) > 0:
raise ValidationError(errors)
if len(numbers) == 0:
raise ValidationError([_("No serial numbers found")])
# The number of extracted serial numbers must match the expected quantity
if not expected_quantity == len(numbers):
raise ValidationError([_("Number of unique serial number ({s}) must match quantity ({q})").format(s=len(numbers), q=expected_quantity)])
return numbers
def validateFilterString(value, model=None):
"""
Validate that a provided filter string looks like a list of comma-separated key=value pairs
These should nominally match to a valid database filter based on the model being filtered.
e.g. "category=6, IPN=12"
e.g. "part__name=widget"
The ReportTemplate class uses the filter string to work out which items a given report applies to.
For example, an acceptance test report template might only apply to stock items with a given IPN,
so the string could be set to:
filters = "IPN = ACME0001"
Returns a map of key:value pairs
"""
# Empty results map
results = {}
value = str(value).strip()
if not value or len(value) == 0:
return results
groups = value.split(',')
for group in groups:
group = group.strip()
pair = group.split('=')
if not len(pair) == 2:
raise ValidationError(
"Invalid group: {g}".format(g=group)
)
k, v = pair
k = k.strip()
v = v.strip()
if not k or not v:
raise ValidationError(
"Invalid group: {g}".format(g=group)
)
results[k] = v
# If a model is provided, verify that the provided filters can be used against it
if model is not None:
try:
model.objects.filter(**results)
except FieldError as e:
raise ValidationError(
str(e),
)
return results
def addUserPermission(user, permission):
"""
Shortcut function for adding a certain permission to a user.
"""
perm = Permission.objects.get(codename=permission)
user.user_permissions.add(perm)
def addUserPermissions(user, permissions):
"""
Shortcut function for adding multiple permissions to a user.
"""
for permission in permissions:
addUserPermission(user, permission)
def getMigrationFileNames(app):
"""
Return a list of all migration filenames for provided app
"""
local_dir = os.path.dirname(os.path.abspath(__file__))
migration_dir = os.path.join(local_dir, '..', app, 'migrations')
files = os.listdir(migration_dir)
# Regex pattern for migration files
pattern = r"^[\d]+_.*\.py$"
migration_files = []
for f in files:
if re.match(pattern, f):
migration_files.append(f)
return migration_files
def getOldestMigrationFile(app, exclude_extension=True, ignore_initial=True):
"""
Return the filename associated with the oldest migration
"""
oldest_num = -1
oldest_file = None
for f in getMigrationFileNames(app):
if ignore_initial and f.startswith('0001_initial'):
continue
num = int(f.split('_')[0])
if oldest_file is None or num < oldest_num:
oldest_num = num
oldest_file = f
if exclude_extension:
oldest_file = oldest_file.replace('.py', '')
return oldest_file
def getNewestMigrationFile(app, exclude_extension=True):
"""
Return the filename associated with the newest migration
"""
newest_file = None
newest_num = -1
for f in getMigrationFileNames(app):
num = int(f.split('_')[0])
if newest_file is None or num > newest_num:
newest_num = num
newest_file = f
if exclude_extension:
newest_file = newest_file.replace('.py', '')
return newest_file
def clean_decimal(number):
""" Clean-up decimal value """
# Check if empty
if number is None or number == '' or number == 0:
return Decimal(0)
# Convert to string and remove spaces
number = str(number).replace(' ', '')
# Guess what type of decimal and thousands separators are used
count_comma = number.count(',')
count_point = number.count('.')
if count_comma == 1:
# Comma is used as decimal separator
if count_point > 0:
# Points are used as thousands separators: remove them
number = number.replace('.', '')
# Replace decimal separator with point
number = number.replace(',', '.')
elif count_point == 1:
# Point is used as decimal separator
if count_comma > 0:
# Commas are used as thousands separators: remove them
number = number.replace(',', '')
# Convert to Decimal type
try:
clean_number = Decimal(number)
except InvalidOperation:
# Number cannot be converted to Decimal (eg. a string containing letters)
return Decimal(0)
return clean_number.quantize(Decimal(1)) if clean_number == clean_number.to_integral() else clean_number.normalize()
def get_objectreference(obj, type_ref: str = 'content_type', object_ref: str = 'object_id'):
"""lookup method for the GenericForeignKey fields
Attributes:
- obj: object that will be resolved
- type_ref: field name for the contenttype field in the model
- object_ref: field name for the object id in the model
Example implementation in the serializer:
```
target = serializers.SerializerMethodField()
def get_target(self, obj):
return get_objectreference(obj, 'target_content_type', 'target_object_id')
```
The method name must always be the name of the field prefixed by 'get_'
"""
model_cls = getattr(obj, type_ref)
obj_id = getattr(obj, object_ref)
# check if references are set -> return nothing if not
if model_cls is None or obj_id is None:
return None
# resolve referenced data into objects
model_cls = model_cls.model_class()
item = model_cls.objects.get(id=obj_id)
url_fnc = getattr(item, 'get_absolute_url', None)
# create output
ret = {}
if url_fnc:
ret['link'] = url_fnc()
return {
'name': str(item),
'model': str(model_cls._meta.verbose_name),
**ret
}
def inheritors(cls):
"""
Return all classes that are subclasses from the supplied cls
"""
subcls = set()
work = [cls]
while work:
parent = work.pop()
for child in parent.__subclasses__():
if child not in subcls:
subcls.add(child)
work.append(child)
return subcls
|
StarcoderdataPython
|
5021460
|
<reponame>JJ/swarm-ga-worker<filename>worker/main.py
from ga_worker import *
from pso_worker import *
import redis
import json
import os
import time
import base64
import uuid
TOPIC_CONSUME = "population-objects"
TOPIC_PRODUCE = "evolved-population-objects"
WORKER_ID = str (uuid.uuid4())
r = redis.StrictRedis(host='redis', port=6379, db=0)
# {'type': 'subscribe', 'pattern': None, 'channel': b'population-objects', 'data': 1}
while True:
data = None
print("worker LOOP")
message = r.blpop(TOPIC_CONSUME)
# message is a tuple (queue_name, data)
data = message[1]
print("message:from::", TOPIC_CONSUME)
#print("message:type:", type(data))
if data:
#print(data)
#data_args = base64.b64decode(data)
args = json.loads(data)
result = None
print(args["algorithm"])
args["worker_id"] = WORKER_ID
if args["algorithm"] == "GA":
worker = GA_Worker(args)
worker.setup()
result = worker.run()
else:
worker = PSO_Worker(args)
result = worker.run()
#print("result:",result)
# Return with a format for writing to MessageHub
data = json.dumps(result).encode('utf-8')
print("New POPULATION Message")
#r.publish(TOPIC_PRODUCE, data)
r.lpush(TOPIC_PRODUCE, data)
else:
#print("no message")
time.sleep(1)
|
StarcoderdataPython
|
3333036
|
<reponame>impastasyndrome/DS-ALGO-OFFICIAL<gh_stars>10-100
# time complexity: O(m+n)
class Solution:
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or len(matrix) == 0 or not matrix[0]:
return False
m, n = len(matrix), len(matrix[0])
row, col = 0, n - 1
while row < m and col >= 0:
if matrix[row][col] == target:
return True
elif matrix[row][col] < target:
row += 1
else:
col -= 1
return False
|
StarcoderdataPython
|
4995915
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from work.items import ShopItem
import re
class SierratradingpostSpider(scrapy.Spider):
name = 'sierratradingpost'
allowed_domains = ['www.sierratradingpost.com']
start_urls = ['https://www.sierratradingpost.com/']
custom_settings = {
'MYSQL_TABLE': 'data_content_1864',
# 'ITEM_PIPELINES': {
# 'work.pipelines.MysqlPipeline': None
# }
}
def parse(self, response):
nav_level_1_list = response.xpath('//div[contains(@class,"nav-item dropdown navigation-dropdown")]')[1:5]
for nav_level_1 in nav_level_1_list:
cat1 = nav_level_1.xpath('./a/text()').extract_first().strip()
nav_level_2_list = nav_level_1.xpath('./div/div[2]/div')
for nav_level_2 in nav_level_2_list:
cat2 = nav_level_2.xpath('./a/text()').get().strip()
nav_level_3_list = nav_level_2.xpath('./div/a')[2:]
for nav_level_3 in nav_level_3_list:
cat3 = nav_level_3.xpath('./text()').get().strip()
nav_level_3_url = nav_level_3.xpath('./@href').get()
print(f'{cat1}---{cat2}---{cat3}')
meta = {'cat1': cat1, 'cat2': cat2, 'cat3': cat3}
yield Request(response.urljoin(nav_level_3_url), callback=self.parse_product_url, meta=meta)
def parse_product_url(self, response):
product_list = response.xpath('//div[contains(@class,"productThumbnailContainer")]')
for product in product_list:
url = product.xpath('./div/a/@href').get()
yield Request(response.urljoin(url), callback=self.parse_product_info, meta=response.meta)
next_page = response.xpath('//link[@rel="next"]/@href').get()
yield Request(response.urljoin(next_page), callback=self.parse_product_url, meta=response.meta)
def parse_product_info(self, response):
item = ShopItem()
item['PageUrl'] = response.url
item['cat1'] = response.meta['cat1']
item['cat2'] = response.meta['cat2']
item['cat3'] = response.meta['cat3']
item['category'] = '|||'.join((item['cat1'], item['cat2'], item['cat3']))
item['brand'] = response.xpath('//h1[@itemprop="name"]/a/text()').get()
item['gender'] = item['cat1']
item['producttype'] = item['cat2']
item['title'] = ''.join(response.xpath('//h1[@itemprop="name"]/text()').getall())
item['price'] = response.xpath('//meta[@name="product:price:amount"]/@content').get()
item['short_content'] = ''
content = response.xpath('//ul[@class="list m-t-sm links-underline"]').get()
item['content'] = re.sub(r'<a.*?</a>', '', content)
picture = response.xpath('//input[@id="largeImageSrcTemplate"]/@value').get()
pictures = '|||'.join(response.xpath('//div[@data-ajaxaltimage-next-index]/a/@href').getall())
item['pictures'] = pictures or picture
item['color'] = response.xpath('//select[@id="selectedProperty1"]/option[2]/text()').get()
item['size'] = '|||'.join(response.xpath('//select[@id="selectedProperty2"]/option/text()')[1:].getall())
yield item
|
StarcoderdataPython
|
3386394
|
import pandas as pd
import plotly.graph_objects as go
def vis_pentagon(dataset, dirplace, d1, d2, d3, d4, d5, target, nclass, ntest):
c_list = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
da1_list = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
da2_list = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
da3_list = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
da4_list = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
da5_list = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
c1 = [[], [], [], [], []]
c2_da1, c2_da2, c2_da3, c2_da4, c2_da5 = [], [], [], [], []
c3_da1, c3_da2, c3_da3, c3_da4, c3_da5 = [], [], [], [], []
c4_da1, c4_da2, c4_da3, c4_da4, c4_da5 = [], [], [], [], []
c5_da1, c5_da2, c5_da3, c5_da4, c5_da5 = [], [], [], [], []
c6_da1, c6_da2, c6_da3, c6_da4, c6_da5 = [], [], [], [], []
c7_da1, c7_da2, c7_da3, c7_da4, c7_da5 = [], [], [], [], []
for i in range(ntest):
class_idx = int(target[i])
da1_list[class_idx]+=d1[i]
da2_list[class_idx]+=d2[i]
da3_list[class_idx]+=d3[i]
da4_list[class_idx]+=d4[i]
da5_list[class_idx]+=d5[i]
c_list[class_idx]+=1
for class_idx in range(nclass):
da1_list[class_idx]='{:.5f}'.format(da1_list[class_idx]/c_list[class_idx])
da2_list[class_idx]='{:.5f}'.format(da2_list[class_idx]/c_list[class_idx])
da3_list[class_idx]='{:.5f}'.format(da3_list[class_idx]/c_list[class_idx])
da4_list[class_idx]='{:.5f}'.format(da4_list[class_idx]/c_list[class_idx])
da5_list[class_idx]='{:.5f}'.format(da5_list[class_idx]/c_list[class_idx])
#categories = ['Identity','Jitter','Window Warp', 'Magnitude Warp', 'Time Warp']
categories = ['Identity','Jitter', 'Magnitude Warp', 'Window Warp', 'Time Warp']
fig = go.Figure()
for class_idx in range(nclass):
fig.add_trace(go.Scatterpolar(
r=[float(da1_list[class_idx]), float(da2_list[class_idx]), float(da3_list[class_idx]), float(da4_list[class_idx]), float(da5_list[class_idx])],
theta=categories,
fill='toself',
opacity=0.9,
line=dict(width=4),
name='Class {}'.format(class_idx+1)))
fig.update_layout(
template=None,
polar=dict(
radialaxis=dict(
tickfont=dict(size=18),
showline=False,
nticks= 10,
showgrid=False,
visible=True,
range=[0.0, 1.0])),
legend_title="Class",
font=dict(
family="Courier New, monospace",
size=20,
color="RebeccaPurple"),
showlegend=True)
fig.write_image(dirplace+"/pentagon_{}.pdf".format(dataset))
if __name__=="__main__":
dataset_id = 9
if dataset_id == 1:
dataset = 'Crop'
ntest = 16800
nclass = 24
if dataset_id == 2:
dataset = 'ElectricDevices'
ntest = 7711
nclass = 7
if dataset_id == 3:
dataset = 'FordA'
ntest = 1320
nclass = 2
if dataset_id == 4:
dataset = 'FordB'
ntest = 810
nclass = 2
if dataset_id == 5:
dataset = 'HandOutlines'
ntest = 370
nclass = 2
if dataset_id == 6:
dataset = 'MelbournePedestrian'
ntest = 2439
nclass = 10
if dataset_id == 7:
dataset = 'NonInvasiveFetalECGThorax1'
ntest = 1965
nclass = 42
if dataset_id == 8:
dataset = 'NonInvasiveFetalECGThorax1'
ntest = 1965
nclass = 42
if dataset_id == 9:
dataset = 'PhalangesOutlinesCorrect'
ntest = 858
nclass = 2
if dataset_id == 10:
dataset = 'StarLightCurves'
ntest = 8236
nclass = 3
if dataset_id == 11:
dataset = 'TwoPatterns'
ntest = 4000
nclass = 4
if dataset_id == 12:
dataset = 'Wafer'
ntest = 6164
nclass = 2
dirplace = '../{}/'.format(dataset)
df = pd.read_csv(dirplace+'{}.csv'.format(dataset), index_col=0)
print(df.mean())
print(df.std())
vis_pentagon(dataset, dirplace, list(df['0']),list(df['1']),list(df['2']),list(df['3']),list(df['4']), list(df['5']), nclass, ntest)
|
StarcoderdataPython
|
181547
|
<reponame>MarcoYLyu/scytale
#!/usr/bin/env python3
from . import lattice
from . import crypto
from . import factor
from . import ecurve
from .crypto import *
from .ecurve import *
from .factor import *
from .lattice import *
from .algorithm import *
__all__ = ['crypto', 'ecurve', 'factorization', 'lattice']
|
StarcoderdataPython
|
6501064
|
<reponame>stoimenoff/ultimate-tic-tac-tie<filename>tests/ai_test.py
import unittest
from unittest.mock import patch, PropertyMock
from ultimatetictactoe.game.players.ai import *
from ultimatetictactoe.game.boards import Macroboard, GameEndedError, Square
class TestBots(unittest.TestCase):
MOVES = [(2, 2), (7, 6), (3, 2), (1, 7), (3, 4), (2, 5),
(7, 7), (4, 4), (4, 3), (4, 2), (3, 7), (0, 4), (2, 3),
(8, 2), (6, 8), (1, 8), (5, 7), (6, 5), (2, 8), (8, 6),
(6, 1), (0, 3), (0, 0), (2, 1), (8, 4), (6, 4), (1, 5),
(4, 7), (3, 3), (2, 0), (8, 1), (6, 3), (0, 2), (2, 7),
(8, 7), (6, 0), (1, 0), (5, 0), (6, 2), (0, 7), (2, 4),
(8, 8), (6, 7), (1, 3), (5, 2), (8, 0), (7, 0), (3, 0),
(1, 1), (5, 3), (7, 2), (3, 6), (5, 5), (7, 1), (3, 5)]
def test_selector(self):
self.assertIsInstance(select_bot(1), GentlemanBot)
self.assertIsInstance(select_bot(2), RandomBot)
self.assertIsInstance(select_bot(3), HeuristicsBot)
self.assertIsInstance(select_bot(4), AlphaBetaBot)
for i in range(5, 10):
with self.assertRaises(ValueError):
select_bot(i)
for i in range(0, -6, -1):
with self.assertRaises(ValueError):
select_bot(i)
def test_choose_move(self):
board = Macroboard()
for difficulty in range(1, 5):
bot = select_bot(difficulty)
move = bot.choose_move(board)
self.assertIn(move, board.available_moves)
with patch('ultimatetictactoe.game.boards.Macroboard.available_moves',
new_callable=PropertyMock) as mock_moves:
mock_moves.return_value = []
for difficulty in range(1, 5):
bot = select_bot(difficulty)
with self.assertRaises(GameEndedError):
bot.choose_move(board)
def test_score_macroboard(self):
board = Macroboard()
scores_x = [2, 0, 0, 0, 0, 3, 3, 5, 5, 8, 8, 8, 8, 8, 8, 10, 10, 12,
12, 12, 10, 10, 10, 12, 12, 12, 12, 12, 10, 17, 17, 19, 19,
23, 25, 25, 25, 25, 25, 25, 25, 25, 25, 29, 29, 29, 29, 29,
29, 35, 33, 35, 37, 38, 34]
scores_o = [0, 0, 0, 0, 2, 2, 2, 2, 7, 7, 7, 7, 7, 7, 7, 7, 9, 9, 9, 9,
11, 11, 15, 15, 15, 15, 17, 17, 19, 19, 19, 19, 22, 22, 24,
26, 26, 26, 26, 26, 28, 30, 28, 26, 26, 28, 28, 26, 28, 28,
33, 33, 35, 37, 45]
for i in range(len(self.MOVES)):
score_x = heuristics.score_macroboard(board, Square.X)
score_o = heuristics.score_macroboard(board, Square.O)
self.assertEqual(score_x, scores_x[i])
self.assertEqual(score_o, scores_o[i])
board.make_move(*self.MOVES[i])
score_x = heuristics.score_macroboard(board, Square.X)
score_o = heuristics.score_macroboard(board, Square.O)
self.assertEqual(score_x, 10000)
self.assertEqual(score_o, 33)
board = Macroboard()
for i in range(len(self.MOVES)):
score = heuristics.score(board)
expected = (scores_x[i] - scores_o[i]) * (-1)**i
self.assertEqual(score, expected)
board.make_move(*self.MOVES[i])
|
StarcoderdataPython
|
9702075
|
<reponame>KaloyankerR/python-fundamentals-repository
import math
n = int(input())
p = int(input())
courses = math.ceil(n / p)
print(courses)
|
StarcoderdataPython
|
3265314
|
<reponame>TNRIS/api.tnris.org<filename>src/data_hub/tnris_org/bulk_actions.py
#
# BULK ACTIONS
# used in admin console list display
#
def close_registration(modeladmin, request, queryset):
queryset.update(registration_open=False)
close_registration.short_description = "Close Registration"
def open_registration(modeladmin, request, queryset):
queryset.update(registration_open=True)
open_registration.short_description = "Open Registration"
def close_to_public(modeladmin, request, queryset):
queryset.update(public=False)
close_to_public.short_description = "Close to Public on website"
def open_to_public(modeladmin, request, queryset):
queryset.update(public=True)
open_to_public.short_description = "Open to Public on website"
|
StarcoderdataPython
|
6556837
|
<gh_stars>0
#! /usr/bin/env python
# A script to calculate the time integration parameters for generalized alpha time integration
# for both, structure and fluid.
#
# Generalized-alpha time integration for structural dynamics follows "<NAME>. & <NAME>. A Time Integration Algorithm for Structural Dynamics With Improved Numerical Dissipation: The Generalized-alpha Method Journal of Applied Mechanics, 1993, 60, 371-375"
#
# Generalized-alpha time integration for fluid dynamics follows "<NAME>.; <NAME>. & <NAME>. A generalized-alpha method for integrating the filtered Navier--Stokes equations with a stabilized finite element method Computer Methods in Applied Mechanics and Engineering, 2000, 190, 305-319"
#
# Call: ParamsGenAlpha.py rho_structure rho_fluid
#
# Input parameters:
# rho_structure spectral radius for structural time integration
# rho_fluid spectral radius for fluid time integration
#
# Author: <NAME> (02/2012)
#
# import input arguments
import sys
rho_struct = float(sys.argv[1])
rho_fluid = float(sys.argv[2])
# compute time integration parameters for structural gen-alpha
alpha_F_struct = rho_struct / (rho_struct + 1.0)
alpha_M_struct = (2.0*rho_struct - 1.0)/(rho_struct + 1.0)
gamma_struct = 0.5 - alpha_M_struct + alpha_F_struct
beta_struct = 0.25 * pow(1 - alpha_M_struct + alpha_F_struct,2)
# compute time integration parameters for fluid gen-alpha
alpha_F_fluid = 1.0 / (1.0 + rho_fluid)
alpha_M_fluid = 0.5 * (3.0 - rho_fluid)/(1.0 + rho_fluid)
gamma_fluid = 0.5 + alpha_M_fluid - alpha_F_fluid
# print time integration parameters to console
print("\nStructure:")
print("ALPHA_F\t\t\t\t" + str(alpha_F_struct))
print("ALPHA_M\t\t\t\t" + str(alpha_M_struct))
print("BETA\t\t\t\t" + str(beta_struct))
print("GAMMA\t\t\t\t" + str(gamma_struct))
print("\n")
print("Fluid:")
print("ALPHA_M\t\t\t\t" + str(alpha_M_fluid))
print("ALPHA_F\t\t\t\t" + str(alpha_F_fluid))
print("GAMMA\t\t\t\t" + str(gamma_fluid))
print("\n")
|
StarcoderdataPython
|
4932836
|
<filename>fibertree/codec/matrix-vector-knkn.py
from swoop import *
## Test program: Tiled K-Stationary vector-matrix multiplication
#
# Z_n = A_k * B_kn
# Tiled:
# Z_n1n0 = A_k1k0 * B_k1n1k0n0
#
#for k1, (a_k0, b_n1) in a_k1 & b_k1:
# for n1, (z_n0, b_n0) in z_n1 << b_n1:
# for k0, (a, b_n0) in a_k0 & b_k0:
# for n0, (z, b) in z_n0 << b_n0:
# z += a * b
a = SwoopTensor(name="A", rank_ids=["K1", "K0"])
b = SwoopTensor(name="B", rank_ids=["K1", "N1", "K0", "N0"])
z = SwoopTensor(name="Z", rank_ids=["N1", "N0"])
a_k1 = a.getStartHandle()
b_k1 = b.getStartHandle()
z_n1 = z.getStartHandle()
z_root = z.getRootHandle()
# a_k1 & b_k1
a_k1_handles = Scan(a_k1)
b_k1_handles = Scan(b_k1)
a_k1_coords = HandlesToCoords(a_k1, a_k1_handles)
b_k1_coords = HandlesToCoords(b_k1, b_k1_handles)
(ab_k1_coords, ab_a_k1_handles, ab_b_k1_handles) = Intersect(a_k1_coords, a_k1_handles, b_k1_coords, b_k1_handles, instance_name="K1")
ab_a_k1_payloads = HandlesToPayloads(a_k1, ab_a_k1_handles)
ab_b_k1_payloads = HandlesToPayloads(b_k1, ab_b_k1_handles)
a_k0s = PayloadsToFiberHandles(a_k1, ab_a_k1_payloads)
b_n1s = PayloadsToFiberHandles(b_k1, ab_b_k1_payloads)
# z_n1 << b_n1
b_n1_handless = Scan(b_n1s)
b_n1_coordss = HandlesToCoords(b_n1s, b_n1_handless)
b_n1_payloadss = HandlesToPayloads(b_n1s, b_n1_handless)
# Repeat z_n1 iteration for each b_n1
z_n1s = Amplify(z_n1, b_n1s)
(z_n1_handless, z_n1_new_fiber_handles) = InsertionScan(z_n1s, b_n1_coordss)
z_n1_payloadss = HandlesToPayloads(z_n1s, z_n1_handless)
b_k0ss = PayloadsToFiberHandles(b_n1s, b_n1_payloadss)
z_n0ss = PayloadsToFiberHandles(z_n1s, z_n1_payloadss)
# a_k0 & b_k0
b_k0_handlesss = Scan(b_k0ss)
# Repeat a_k0 iteration for each b_k0
a_k0ss = Amplify(a_k0s, b_k0ss, instance_name="K0")
a_k0_handlesss = Scan(a_k0ss)
a_k0_coordsss = HandlesToCoords(a_k0ss, a_k0_handlesss)
b_k0_coordsss = HandlesToCoords(b_k0ss, b_k0_handlesss)
(ab_k0_coordsss, ab_a_k0_handlesss, ab_b_k0_handlesss) = Intersect(a_k0_coordsss, a_k0_handlesss, b_k0_coordsss, b_k0_handlesss, instance_name="K0")
ab_a_k0_payloadsss = HandlesToPayloads(a_k0ss, ab_a_k0_handlesss)
ab_b_k0_payloadsss = HandlesToPayloads(b_k0ss, ab_b_k0_handlesss)
a_valuesss = PayloadsToValues(a_k0ss, ab_a_k0_payloadsss)
b_n0sss = PayloadsToFiberHandles(b_k0ss, ab_b_k0_payloadsss)
# z_n0 << b_n0
b_n0_handlessss = Scan(b_n0sss)
b_n0_coordssss = HandlesToCoords(b_n0sss, b_n0_handlessss)
b_n0_payloadssss = HandlesToPayloads(b_n0sss, b_n0_handlessss)
# Repeat z_n0 iteration for each b_n0
z_n0sss = Amplify(z_n0ss, b_n0sss, instance_name="N0")
(z_n0_handlessss, z_n0_new_fiber_handlesss) = InsertionScan(z_n0sss, b_n0_coordssss)
z_n0_payloadssss = HandlesToPayloads(z_n0sss, z_n0_handlessss)
a_valuessss = Amplify(a_valuesss, b_n0_handlessss)
b_valuessss = PayloadsToValues(b_n0sss, b_n0_payloadssss)
z_valuessss = PayloadsToValues(z_n0sss, z_n0_payloadssss)
# z_ref += a_val * b_val
# NOTE: MUL and ADD broken out for efficiency
body_func = lambda a_val, b_val, z_val: z_val + a_val * b_val
resultssss = Compute(body_func, a_valuessss, b_valuessss, z_valuessss)
# Reduce into the same value until end of rank
z_n0_update_ackssss = UpdatePayloads(z_n0sss, z_n0_handlessss, resultssss)
# Update N0 occupancy. (Should we be reducing here?)
z_n1s = Amplify(z_n1, b_n1s)
z_n1ss = Amplify(z_n1s, b_k0ss)
z_n1_handlesss = Amplify(z_n1_handless, b_n0sss)
z_n1_update_ackss = UpdatePayloads(z_n1ss, z_n1_handlesss, z_n0_new_fiber_handlesss)
# Update root occupancy
z_root_handles = Amplify(Stream0(0), z_n1_new_fiber_handles)
z_root_update_acks = UpdatePayloads(z_root, z_root_handles, z_n1_new_fiber_handles)
N1 = 2
N0 = 3
K1 = 2
K0 = 3
my_a_root = BasicIntermediateRankImplementation(1, 1)
my_a_k1 = BasicIntermediateRankImplementation(K1, K0)
my_a_k0 = [BasicFiberImplementation([1, 2, 3]), BasicFiberImplementation([2, 4, 6])]
my_b_root = BasicIntermediateRankImplementation(1, 1)
my_b_k1 = BasicIntermediateRankImplementation(K1, N1)
my_b_n1 = [BasicIntermediateRankImplementation(N1, K0), BasicIntermediateRankImplementation(N1, K0, 1)]
my_b_k0 = [BasicIntermediateRankImplementation(K0, N0), BasicIntermediateRankImplementation(K0, N0, 1), BasicIntermediateRankImplementation(K0, N0, 2), BasicIntermediateRankImplementation(K0, N0, 3)]
my_b_n0 = [BasicFiberImplementation([4, 5, 6]),
BasicFiberImplementation([5, 6, 7]),
BasicFiberImplementation([6, 7, 8]),
BasicFiberImplementation([12, 15, 18]),
BasicFiberImplementation([15, 18, 21]),
BasicFiberImplementation([18, 21, 24]),
BasicFiberImplementation([8, 10, 12]),
BasicFiberImplementation([10, 12, 14]),
BasicFiberImplementation([12, 14, 16]),
BasicFiberImplementation([16, 20, 24]),
BasicFiberImplementation([20, 24, 28]),
BasicFiberImplementation([24, 28, 32])]
my_z_root = BasicIntermediateRankImplementation(1, 1)
my_z_n1 = BasicIntermediateRankImplementation(N1, N0)
my_z_n0 = []
for n1 in range(N1):
my_z_n0.append(BasicFiberImplementation([0] * N0))
a.setImplementations("root", [my_a_root])
a.setImplementations("K1", [my_a_k1])
a.setImplementations("K0", my_a_k0)
b.setImplementations("root", [my_b_root])
b.setImplementations("K1", [my_b_k1])
b.setImplementations("N1", my_b_n1)
b.setImplementations("K0", my_b_k0)
b.setImplementations("N0", my_b_n0)
z.setImplementations("root", [my_z_root])
z.setImplementations("N1", [my_z_n1])
z.setImplementations("N0", my_z_n0)
evaluate(z_n0_update_ackssss, 4)
evaluate(z_n1_update_ackss, 2)
evaluate(z_root_update_acks, 1)
expected_vals = [[160, 190, 220], [352, 418, 484]]
print(f"Final K-Stationary result:")
for n1 in range(N1):
print(my_z_n0[n1].vals)
for n1 in range(N1):
assert(my_z_n0[n1].vals == expected_vals[n1])
print("==========================")
|
StarcoderdataPython
|
3486410
|
<reponame>bhv/covid-19-growth<filename>lib/us.py
import pandas as pd
from operator import itemgetter
import etl
from pprint import pprint as pp
# Dataframes
# `df_us` A Dictionary of case, death, and recovery dataframes for the US
# `df_us_states` A Dictionary of state-level case, death, and recovery dataframes for the US
# `df_us_population` 2019 US census population data by state, sub-region, and region
# Functions
# `us_data(df)` Filter input dataframe on US rows.
# `us_data_state(df)` Filter input US dataframe state-level records.
# `population_for_state(state_name)`
# All US data. Applied to cases, deaths, and recoveries in the dictionary `df_us`
def us_data(df):
df = df.rename(columns={'province_state': 'state'})
# For consistency with census data
df.state = df.state.apply(lambda state: (state, 'District of Columbia') [state == 'Washington, D.C.'])
df.state, df['state_abbrev'] = itemgetter(0, 1)(df.state.str.split(', ').str)
return df
# Sate-level US data. Applied to cases, deaths, and recoveries in the dictionary `df_us_states`
def us_data_state(df):
df = df[df['state'].isin(df_us_population['state'])]
return df[['day', 'state', 'cases']]
# Unused at present
def population_for_state(state_name):
return df_us_population[df_us_population.state == state_name].iloc[0].population_2019
# US population
df_us_population = pd.read_csv('csv/us_population.csv')
# Dict of all US data
df_us = {
'cases': us_data(etl.for_country(etl.df_cases, 'US')),
'deaths': us_data(etl.for_country(etl.df_deaths, 'US')),
'recovered': us_data(etl.for_country(etl.df_recovered, 'US'))
}
# Dict of US state-level data
df_us_states = {
'cases': us_data_state(df_us['cases']),
'deaths': us_data_state(df_us['deaths']),
'recovered': us_data_state(df_us['recovered']),
}
|
StarcoderdataPython
|
32905
|
import unittest
from app.models import Source
class testSource(unittest.TestCase):
"""
SourcesTest class to test the behavior of the Sources class
"""
def setUp(self):
"""
Method that runs before each other test runs
"""
self.new_source = Source('abc-news','ABC news','Your trusted source for breaking news',"https://abcnews.go.com","general","en","us")
def test_instance(self):
self.assertTrue(isinstance(self.new_source,Source))
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
6435727
|
<filename>ndic/tests/test_search.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from unittest import TestCase
import mock
import requests
from ndic.search import search
from ndic.exceptions import NdicConnectionError
class NdicTestCase(TestCase):
def test_search_korean_word(self):
test_search_korean_word = "사과"
test_corresponding_english_word = "(과일) apple"
self.assertEqual(
search(test_search_korean_word),
test_corresponding_english_word,
)
def test_search_english_word(self):
test_search_english_word = "apple"
test_corresponding_korean_word = "사과"
self.assertEqual(
search(test_search_english_word),
test_corresponding_korean_word,
)
def test_search_nonexistent_korean_word(self):
test_nonexistent_korean_word = "아갸야라"
self.assertFalse(
search(test_nonexistent_korean_word),
)
def test_search_nonexistent_english_word(self):
test_nonexistent_english_word = "asfasdfasdf"
self.assertFalse(
search(test_nonexistent_english_word),
)
def test_search_korean_word_multiple_meaning(self):
test_search_korean_word = "말"
test_corresponding_english_word_1 = "(언어) word, language, speech, " \
"(literary) tongue"
test_corresponding_english_word_2 = "(동물) horse"
test_corresponding_english_word_3 = "(마지막) end (of), close (of)"
self.assertEqual(
search(test_search_korean_word, 1),
test_corresponding_english_word_1,
)
self.assertEqual(
search(test_search_korean_word, 2),
test_corresponding_english_word_2,
)
self.assertEqual(
search(test_search_korean_word, 3),
test_corresponding_english_word_3,
)
def test_search_english_word_multiple_meaning(self):
test_search_english_word = "get"
test_corresponding_korean_word_1 = "받다"
test_corresponding_korean_word_2 = "얻다, 입수하다; 가지다(obtain)"
test_corresponding_korean_word_3 = "(동물의) 새끼; 새끼를 낳음"
self.assertEqual(
search(test_search_english_word, 1),
test_corresponding_korean_word_1,
)
self.assertEqual(
search(test_search_english_word, 2),
test_corresponding_korean_word_2,
)
self.assertEqual(
search(test_search_english_word, 3),
test_corresponding_korean_word_3,
)
def test_search_xth_exceed(self):
test_nonexistent_english_word = "말"
self.assertFalse(
search(test_nonexistent_english_word, 10),
)
def test_search_negative_or_zero_xth(self):
test_nonexistent_english_word = "말"
self.assertFalse(
search(test_nonexistent_english_word, -1),
)
self.assertFalse(
search(test_nonexistent_english_word, 0),
)
@mock.patch.object(requests, 'get', side_effect=requests.ConnectionError)
def test_search_without_internet_network(self, mock_requests):
test_search_korean_word = "사과"
self.assertRaises(
NdicConnectionError,
search,
test_search_korean_word,
)
|
StarcoderdataPython
|
6578991
|
<filename>kws/augmentations/wave_augmentations/__init__.py
from kws.augmentations.wave_augmentations.wave_augmentations import WaveAugs
__all__ = [
'WaveAugs'
]
|
StarcoderdataPython
|
1904056
|
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.PythonUtil import invertDictLossless
from toontown.coghq import StageRoomSpecs
from toontown.toonbase import ToontownGlobals
from direct.showbase.PythonUtil import normalDistrib, lerp
import random
def printAllCashbotInfo():
print('roomId: roomName')
for roomId, roomName in list(StageRoomSpecs.CashbotStageRoomId2RoomName.items()):
print('%s: %s' % (roomId, roomName))
print('\nroomId: numBattles')
for roomId, numBattles in list(StageRoomSpecs.roomId2numBattles.items()):
print('%s: %s' % (roomId, numBattles))
print('\nstageId floor roomIds')
printStageRoomIds()
print('\nstageId floor numRooms')
printNumRooms()
print('\nstageId floor numForcedBattles')
printNumBattles()
def iterateLawbotStages(func):
from toontown.toonbase import ToontownGlobals
for layoutId in range(len(stageLayouts)):
for floorNum in range(getNumFloors(layoutId)):
func(StageLayout(0, floorNum, layoutId))
def printStageInfo():
def func(sl):
print(sl)
iterateLawbotStages(func)
def printRoomUsage():
usage = {}
def func(sl):
for roomId in sl.getRoomIds():
usage.setdefault(roomId, 0)
usage[roomId] += 1
iterateLawbotStages(func)
roomIds = list(usage.keys())
sorted(roomIds)
for roomId in roomIds:
print('%s: %s' % (roomId, usage[roomId]))
def printRoomInfo():
roomIds = list(StageRoomSpecs.roomId2numCogs.keys())
sorted(roomIds)
for roomId in roomIds:
print('room %s: %s cogs, %s cogLevels, %s merit cogLevels' % (roomId,
StageRoomSpecs.roomId2numCogs[roomId],
StageRoomSpecs.roomId2numCogLevels[roomId],
StageRoomSpecs.roomId2numMeritCogLevels[roomId]))
def printStageRoomIds():
def func(ml):
print(ml.getStageId(), ml.getFloorNum(), ml.getRoomIds())
iterateCashbotStages(func)
def printStageRoomNames():
def func(ml):
print(ml.getStageId(), ml.getFloorNum(), ml.getRoomNames())
iterateCashbotStages(func)
def printNumRooms():
def func(ml):
print(ml.getStageId(), ml.getFloorNum(), ml.getNumRooms())
iterateCashbotStages(func)
def printNumBattles():
def func(ml):
print(ml.getStageId(), ml.getFloorNum(), ml.getNumBattles())
iterateCashbotStages(func)
DefaultLayout1 = ({0: (0,
1,
2,
3,
1,
2,
4),
1: (0,
1,
2,
3,
1,
2,
4),
2: (0,
1,
2,
3,
1,
2,
4),
3: (0,
1,
2,
3,
1,
2,
4),
4: (0,
1,
2,
3,
1,
2,
4),
5: (0,
1,
2,
3,
1,
2,
4),
6: (0,
1,
2,
3,
1,
2,
4),
7: (0,
1,
2,
3,
1,
2,
4),
8: (0,
1,
2,
3,
1,
2,
4),
9: (0,
1,
2,
3,
1,
2,
4),
10: (0,
1,
2,
3,
1,
2,
4),
11: (0,
1,
2,
3,
1,
2,
4),
12: (0,
1,
2,
3,
1,
2,
4),
13: (0,
1,
2,
3,
1,
2,
4),
14: (0,
1,
2,
3,
1,
2,
4),
15: (0,
1,
2,
3,
1,
2,
4),
16: (0,
1,
2,
3,
1,
2,
4),
17: (0,
1,
2,
3,
1,
2,
4),
18: (0,
1,
2,
3,
1,
2,
4),
19: (0,
1,
2,
3,
1,
2,
4)},)
DefaultLayout = [(0,
5,
2,
3,
5,
2,
1),
(0,
5,
2,
3,
5,
2,
1),
(0,
5,
2,
3,
5,
2,
1),
(0,
5,
2,
3,
5,
2,
1),
(0,
5,
2,
3,
5,
2,
1),
(0,
5,
2,
3,
5,
2,
1),
(0,
5,
2,
3,
5,
2,
1),
(0,
5,
2,
3,
5,
2,
1),
(0,
5,
2,
3,
5,
2,
1),
(0,
5,
2,
3,
5,
2,
1),
(0,
5,
2,
3,
5,
2,
1)]
testLayout = [(0,
3,
8,
105,
1), (0,
7,
8,
105,
2)]
LawOfficeLayout2_0 = [(0,
7,
8,
105,
1), (0,
10,
104,
103,
1), (0,
105,
101,
12,
2)]
LawOfficeLayout2_1 = [(0,
10,
11,
104,
1), (0,
100,
105,
8,
1), (0,
103,
3,
104,
2)]
LawOfficeLayout2_2 = [(0,
8,
105,
102,
1), (0,
100,
104,
10,
1), (0,
101,
105,
3,
2)]
LawOfficeLayout3_0 = [(0,
8,
101,
104,
1),
(0,
7,
105,
103,
1),
(0,
100,
8,
104,
1),
(0,
105,
10,
12,
2)]
LawOfficeLayout3_1 = [(0,
100,
8,
105,
1),
(0,
103,
10,
104,
1),
(0,
8,
7,
105,
1),
(0,
104,
12,
101,
2)]
LawOfficeLayout3_2 = [(0,
103,
104,
100,
1),
(0,
102,
8,
105,
1),
(0,
10,
104,
3,
1),
(0,
105,
10,
11,
2)]
LawOfficeLayout4_0 = [(0,
3,
7,
105,
1),
(0,
103,
104,
8,
1),
(0,
102,
105,
11,
1),
(0,
8,
104,
100,
1),
(0,
10,
105,
12,
2)]
LawOfficeLayout4_1 = [(0,
7,
105,
102,
1),
(0,
103,
12,
104,
1),
(0,
101,
104,
8,
1),
(0,
10,
3,
105,
1),
(0,
8,
104,
102,
2)]
LawOfficeLayout4_2 = [(0,
11,
105,
102,
1),
(0,
3,
104,
8,
1),
(0,
100,
10,
104,
1),
(0,
8,
12,
105,
1),
(0,
104,
102,
11,
2)]
LawOfficeLayout5_0 = [(0,
104,
10,
7,
1),
(0,
105,
103,
3,
1),
(0,
104,
11,
12,
1),
(0,
101,
8,
105,
1),
(0,
10,
104,
12,
1),
(0,
105,
100,
7,
2)]
LawOfficeLayout5_1 = [(0,
11,
8,
104,
1),
(0,
102,
10,
105,
1),
(0,
104,
7,
101,
1),
(0,
105,
10,
12,
1),
(0,
8,
11,
105,
1),
(0,
104,
12,
3,
2)]
LawOfficeLayout5_2 = [(0,
105,
103,
8,
1),
(0,
10,
3,
104,
1),
(0,
105,
103,
101,
1),
(0,
12,
8,
104,
1),
(0,
7,
11,
104,
1),
(0,
105,
12,
10,
2)]
stageLayouts = [LawOfficeLayout2_0,
LawOfficeLayout2_1,
LawOfficeLayout2_2,
LawOfficeLayout3_0,
LawOfficeLayout3_1,
LawOfficeLayout3_2,
LawOfficeLayout4_0,
LawOfficeLayout4_1,
LawOfficeLayout4_2,
LawOfficeLayout5_0,
LawOfficeLayout5_1,
LawOfficeLayout5_2]
stageLayouts1 = [testLayout,
testLayout,
testLayout,
testLayout,
testLayout,
testLayout,
testLayout,
testLayout,
testLayout,
testLayout,
testLayout,
testLayout]
def getNumFloors(layoutIndex):
return len(stageLayouts[layoutIndex])
class StageLayout:
notify = DirectNotifyGlobal.directNotify.newCategory('StageLayout')
def __init__(self, stageId, floorNum, stageLayout = 0):
self.stageId = stageId
self.floorNum = floorNum
self.roomIds = []
self.hallways = []
self.layoutId = stageLayout
self.roomIds = stageLayouts[stageLayout][floorNum]
self.numRooms = 1 + len(self.roomIds)
self.numHallways = self.numRooms - 1
hallwayRng = self.getRng()
connectorRoomNames = StageRoomSpecs.CashbotStageConnectorRooms
for i in range(self.numHallways):
self.hallways.append(hallwayRng.choice(connectorRoomNames))
def getNumRooms(self):
return len(self.roomIds)
def getRoomId(self, n):
return self.roomIds[n]
def getRoomIds(self):
return self.roomIds[:]
def getRoomNames(self):
names = []
for roomId in self.roomIds:
names.append(StageRoomSpecs.CashbotStageRoomId2RoomName[roomId])
return names
def getNumHallways(self):
return len(self.hallways)
def getHallwayModel(self, n):
return self.hallways[n]
def getNumBattles(self):
numBattles = 0
for roomId in self.getRoomIds():
numBattles += StageRoomSpecs.roomId2numBattles[roomId]
return numBattles
def getNumCogs(self):
numCogs = 0
for roomId in self.getRoomIds():
numCogs += StageRoomSpecs.roomId2numCogs[roomId]
return numCogs
def getNumCogLevels(self):
numLevels = 0
for roomId in self.getRoomIds():
numLevels += StageRoomSpecs.roomId2numCogLevels[roomId]
return numLevels
def getNumMeritCogLevels(self):
numLevels = 0
for roomId in self.getRoomIds():
numLevels += StageRoomSpecs.roomId2numMeritCogLevels[roomId]
return numLevels
def getStageId(self):
return self.stageId
def getFloorNum(self):
return self.floorNum
def getRng(self):
return random.Random(self.stageId * self.floorNum)
def __str__(self):
return 'StageLayout: id=%s, layout=%s, floor=%s, meritCogLevels=%s, numRooms=%s, numBattles=%s, numCogs=%s' % (self.stageId,
self.layoutId,
self.floorNum,
self.getNumMeritCogLevels(),
self.getNumRooms(),
self.getNumBattles(),
self.getNumCogs())
def __repr__(self):
return str(self)
|
StarcoderdataPython
|
5025061
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGitcreds(RPackage):
"""Query 'git' Credentials from 'R'.
Query, set, delete credentials from the 'git' credential store. Manage
'GitHub' tokens and other 'git' credentials. This package is to be used by
other packages that need to authenticate to 'GitHub' and/or other 'git'
repositories."""
cran = "gitcreds"
version('0.1.1', sha256='b14aaf4e910a9d2d6c65c93e645f0b0159c00898e669f917f83c03dfedb1dfea')
depends_on('git', type='run')
|
StarcoderdataPython
|
1640504
|
# Generated by Django 2.1.8 on 2019-05-15 12:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_recipe'),
]
operations = [
migrations.RenameField(
model_name='recipe',
old_name='Ingredient',
new_name='ingredients',
),
migrations.RenameField(
model_name='recipe',
old_name='Tag',
new_name='tags',
),
]
|
StarcoderdataPython
|
85931
|
<reponame>tmenegaz/django<gh_stars>0
#!/home/tmenegaz/Documentos/cimatec/2016.2/escolaTecnica/mundoSenai/django/aula/aulaDjango/py3.5/bin/python3.5
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
StarcoderdataPython
|
1753988
|
<filename>core-python/Core_Python/regexpkg/Regex_Example1.py
''' Fill in the code to check if the text passed contains the vowels a, e and i, with
exactly one occurrence of any other character in between. '''
import re
def check_aei (text):
result = re.search(r"a.e.i", text)
return result != None
print(check_aei("academia")) # True
print(check_aei("aerial")) # False
print(check_aei("paramedic")) # True
|
StarcoderdataPython
|
4904835
|
<gh_stars>0
import numbers
import numpy as np
import pickle
class euclidean_tVec( object ):
# def __init__( self ):
# self.Type = "Euclidean_Tangent"
# self.nDim = 3
# self.tVector = [ 0, 0, 0 ]
def __init__( self, nDim ):
self.Type = "Euclidean_Tangent"
self.nDim = nDim
self.tVector = np.zeros( nDim )
def GetTangentVector(self):
return self.tVector
def SetTangentVector(self, tVec):
if not len( tVec ) == self.nDim:
print( "Error : Dimensions does not match" )
return
self.tVector = tVec
def InnerProduct( self, tVec1 ):
result = 0
for i in range( self.nDim ):
result += self.tVector[ i ] * tVec1.tVector[ i ]
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ScalarMultiply( self, t ):
tVector_t = euclidean_tVec( self.nDim )
for i in range( self.nDim ):
tVector_t.tVector[ i ] = self.tVector[ i ] * t
return tVector_t
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.tVector, False ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.tVector = infoList[ 2 ]
class euclidean( object ):
# def __init__( self ):
# self.Type = "Euclidean"
# self.nDim = 3
# self.pt = [ 0.0, 0.0, 0.0 ]
def __init__( self, nDim ):
self.Type = "Euclidean"
self.nDim = nDim
self.pt = np.zeros( nDim )
def SetPoint( self, pt ):
if not len( pt ) == self.nDim:
print( "Error : Dimensions does not match" )
return
self.pt = pt
def GetPoint( self ):
return self.pt
def InnerProduct( self, ptA ):
result = 0
for i in range( self.nDim ):
result += self.pt[ i ] * ptA.pt[ i ]
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ExponentialMap( self, tVec ):
exp_pt = euclidean( self.nDim )
newPt_mat = np.zeros( self.nDim )
for i in range( self.nDim ):
newPt_mat[ i ] = self.pt[ i ] + tVec.tVector[ i ]
exp_pt.SetPoint( newPt_mat.tolist() )
return exp_pt
def LogMap( self, ptA ):
tVec = euclidean_tVec( self.nDim )
tVec_list = []
for i in range( self.nDim ):
tVec_i = ptA.pt[ i ] - self.pt[ i ]
tVec_list.append( tVec_i )
tVec.SetTangentVector( tVec_list )
return tVec
def RiemannianDistanceToA( self, ptA ):
tVec_toA = self.LogMap( ptA )
distSq = tVec_toA.norm()
dist = np.sqrt( distSq )
return dist
def ProjectTangent( self, pt, tVec ):
vProjected = tVec
return vProjected
def ParallelTranslate( self, v, w ):
return w
def ParallelTranslateToA( self, ptA, w ):
return w
def ParallelTranslateAtoB( self, ptA, ptB, w ):
return w
def AdjointGradientJacobi( self, v, j, dj ):
# Function Output
jOutput = j
jOutputDash = dj
jOutputDash_list = []
for i in range( self.nDim ):
jOutputDash_i = j.tVector[i] + dj.tVector[i]
jOutputDash_list.append( jOutputDash_i )
jOutputDash.SetTangentVector( jOutputDash_list )
return jOutput, jOutputDash
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.pt, False ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.pt = infoList[ 2 ]
class sphere_tVec( object ):
# def __init__( self ):
# self.Type = "Sphere_Tangent"
# self.nDim = 3
# self.tVector = [ 0, 0, 0 ]
def __init__( self, nDim ):
self.Type = "Sphere_Tangent"
self.nDim = nDim
self.tVector = np.zeros( nDim )
def GetTangentVector(self):
return self.tVector
def SetTangentVector(self, tVec):
if not len( tVec ) == self.nDim:
print( "Error : Dimensions does not match" )
return
self.tVector = tVec
def InnerProduct( self, tVec1 ):
result = 0
for i in range( self.nDim ):
result += self.tVector[ i ] * tVec1.tVector[ i ]
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ScalarMultiply( self, t ):
tVector_t = sphere_tVec( self.nDim )
for i in range( self.nDim ):
tVector_t.tVector[ i ] = self.tVector[ i ] * t
return tVector_t
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.tVector, False ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.tVector = infoList[ 2 ]
class sphere( object ):
# def __init__( self ):
# self.Type = "Sphere"
# self.nDim = 3
# self.pt = [ 1.0, 0.0, 0.0 ] # Base point in S^2
def __init__( self, nDim ):
self.Type = "Sphere"
self.nDim = nDim
pt_base = np.zeros( nDim )
pt_base[ 0 ] = 1
self.pt = pt_base
def SetPoint( self, pt ):
if not len( pt ) == self.nDim:
print( "Error : Dimensions does not match" )
return
if not np.linalg.norm( pt ) == 1:
# print( "Warning : The point is not on a sphere")
self.pt = pt
return
self.pt = pt
def GetPoint( self ):
return self.pt
def InnerProduct( self, ptA ):
result = 0
for i in range( self.nDim ):
result += self.pt[ i ] * ptA.pt[ i ]
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ProjectTangent( self, pt, tVec ):
inner_prod_pt_tVec = 0
for i in range( self.nDim ):
inner_prod_pt_tVec += pt.pt[i] * tVec.tVector[i]
vProjected = sphere_tVec( self.nDim )
for i in range( self.nDim ):
vProjected.tVector[i] = tVec.tVector[i] - inner_prod_pt_tVec * pt.pt[i]
return vProjected
def ParallelTranslate( self, v, w ):
vNorm = v.norm()
if( vNorm < 1.0e-12 ):
return w
innerProd = v.InnerProduct( w )
scaleFactor = innerProd / ( vNorm * vNorm )
# Component of w orthogonal to v
orth = sphere_tVec( self.nDim )
for i in range( self.nDim ):
orth.tVector[ i ] = w.tVector[ i ] - v.tVector[ i ] * scaleFactor
# Compute parallel translated v
vParallel = sphere_tVec( self.nDim )
for i in range( self.nDim ):
vParallel.tVector[ i ] = self.pt[ i ]* ( -np.sin( vNorm ) * vNorm ) + v.tVector[ i ] * np.cos( vNorm )
# Parallel Translated w
wParallelTranslated = sphere_tVec( self.nDim )
for i in range( self.nDim ):
wParallelTranslated.tVector[ i ] = vParallel.tVector[ i ] * scaleFactor + orth.tVector[ i ]
return wParallelTranslated
def ParallelTranslateToA( self, ptA, w ):
v = self.LogMap( ptA )
vNorm = v.norm()
if( vNorm < 1.0e-12 ):
return w
innerProd = v.InnerProduct( w )
scaleFactor = innerProd / ( vNorm * vNorm )
# Component of w orthogonal to v
orth = sphere_tVec( self.nDim )
for i in range( self.nDim ):
orth.tVector[ i ] = w.tVector[ i ] - v.tVector[ i ] * scaleFactor
# Compute parallel translated v
vParallel = sphere_tVec( self.nDim )
for i in range( self.nDim ):
vParallel.tVector[ i ] = self.pt[ i ]* ( -np.sin( vNorm ) * vNorm ) + v.tVector[ i ] * np.cos( vNorm )
# Parallel Translated w
wParallelTranslated = sphere_tVec( self.nDim )
for i in range( self.nDim ):
wParallelTranslated.tVector[ i ] = vParallel.tVector[ i ] * scaleFactor + orth.tVector[ i ]
return wParallelTranslated
def AdjointGradientJacobi( self, v, j, dj ):
e_base = self
vNorm = v.norm()
if vNorm < 1.0e-12:
for i in range( self.nDim ):
dj.tVector[ i ] = j.tVector[ i ] + dj.tVector[ i ]
# Function Output
jOutput = j
jOutputDash = dj
else:
innerProdVJ = v.InnerProduct( j )
innerProdVJPrime = v.InnerProduct( dj )
scaleFactorJ = innerProdVJ / ( vNorm * vNorm )
scaleFactorJPrime = innerProdVJPrime / ( vNorm * vNorm )
jTang = sphere_tVec( self.nDim )
djTang = sphere_tVec( self.nDim )
jOrth = sphere_tVec( self.nDim )
djOrth = sphere_tVec( self.nDim )
for i in range( self.nDim ):
jTang.tVector[ i ] = v.tVector[ i ] * scaleFactorJ
djTang.tVector[ i ] = v.tVector[ i ] * scaleFactorJPrime
jOrth.tVector[ i ] = j.tVector[ i ] - jTang.tVector[ i ]
djOrth.tVector[ i ] = dj.tVector[ i ] - djTang.tVector[ i ]
j.tVector[ i ] = jTang.tVector[ i ] + ( np.cos( vNorm ) * jOrth.tVector[ i ] ) - ( ( vNorm * np.sin( vNorm ) ) * djOrth.tVector[ i ] )
j = e_base.ParallelTranslate( v, j )
for i in range( self.nDim ):
dj.tVector[ i ] = jTang.tVector[ i ] + djTang.tVector[ i ] + ( np.sin(vNorm) / vNorm ) * jOrth.tVector[ i ] + np.cos( vNorm ) * djOrth.tVector[ i ]
dj = e_base.ParallelTranslate( v, dj )
# Function Output
jOutput = j
jOutputDash = dj
return jOutput, jOutputDash
def ParallelTranslateAtoB( self, a, b, w ):
v = a.LogMap( b )
return a.ParallelTranslate( v, w )
def ExponentialMap( self, tVec ):
theta = np.linalg.norm( tVec.tVector )
if theta < 1e-12:
exp_pt = sphere( self.nDim )
exp_pt.pt = self.pt
return exp_pt
if theta > np.pi * 2:
theta = np.mod( theta, np.pi * 2 )
exp_pt = sphere( self.nDim )
lhs = np.multiply( np.cos( theta ), self.pt )
rhs = np.multiply( np.sin( theta ) / theta, tVec.tVector )
exp_pt.pt = lhs + rhs
exp_pt.pt = np.divide( exp_pt.pt, exp_pt.norm() )
return exp_pt
def LogMap( self, another_pt ):
cosTheta = self.InnerProduct( another_pt )
tVec = sphere_tVec( self.nDim )
for i in range( self.nDim ):
tVec.tVector[ i ] = another_pt.pt[ i ] - cosTheta * self.pt[ i ]
length = tVec.norm()
if length < 1e-12 or cosTheta >= 1.0 or cosTheta <= -1.0:
tVec = sphere_tVec( self.nDim )
return tVec
for i in range( self.nDim ):
tVec.tVector[ i ] = tVec.tVector[ i ] * np.arccos( cosTheta ) / length
return tVec
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.pt, False ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.pt = infoList[ 2 ]
class pos_real_tVec( object ):
def __init__( self, nDim ):
self.Type = "PositiveReal_Tangent"
self.nDim = nDim
self.tVector = np.zeros( nDim ).tolist()
def GetTangentVector(self):
return self.tVector
def SetTangentVector(self, tVec):
if type( tVec ) == list:
if not len( tVec ) == self.nDim:
print( "Error : Dimensions does not match" )
return
else:
self.tVector = tVec
else:
if not self.nDim == 1:
print( "Error : Dimensions does not match" )
return
else:
self.tVector[ 0 ] = tVec
def InnerProduct( self, tVec1 ):
result = 0
for i in range( self.nDim ):
result += self.tVector[ i ] * tVec1.tVector[ i ]
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ScalarMultiply( self, t ):
tVector_t = pos_real_tVec( self.nDim )
for i in range( self.nDim ):
tVector_t.tVector[ i ] = self.tVector[ i ] * t
return tVector_t
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.tVector, False ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.tVector = infoList[ 2 ]
class pos_real( object ):
def __init__( self, nDim ):
self.Type = "PositiveReal"
self.nDim = nDim
self.pt = np.ones( nDim ).tolist()
def SetPoint( self, pt ):
if type( pt ) == list:
if not len( pt ) == self.nDim:
print( "Error : Dimensions does not match" )
return
else:
self.pt = pt
else:
if not self.nDim == 1:
print( "Error : Dimensions does not match" )
return
else:
self.pt[ 0 ] = pt
def GetPoint( self ):
return self.pt
def InnerProduct( self, ptA ):
result = 0
for i in range( self.nDim ):
result += self.pt[ i ] * ptA.pt[ i ]
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ExponentialMap( self, tVec ):
exp_pt = pos_real( self.nDim )
# print( "Tangent Vector" )
# print( tVec.tVector )
exp_pt.SetPoint( np.multiply( self.pt, np.exp( tVec.tVector ) ).tolist() )
return exp_pt
def LogMap( self, another_pt ):
tVec = pos_real_tVec( self.nDim )
tVec.SetTangentVector( np.log( np.divide( another_pt.pt, self.pt ) ).tolist() )
return tVec
def ProjectTangent( self, pt, tVec ):
vProjected = tVec
return vProjected
def ParallelTranslate( self, v, w ):
return w
def ParallelTranslateToA( self, ptA, w ):
return w
def ParallelTranslateAtoB( self, a, b, w ):
return w
def AdjointGradientJacobi( self, v, j, dj, ):
jOutput = j
jOutputDash = dj
for i in range( self.nDim ):
jOutputDash.tVector[ i ] = j.tVector[ i ] + dj.tVector[ i ]
return jOutput, jOutputDash
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.pt, False ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.pt = infoList[ 2 ]
class cmrep_tVec( object ):
def __init__( self, nDim ):
self.Type = "CMRep_Tangent"
self.nDim = nDim
self.tVector = []
for i in range( self.nDim ):
self.tVector.append( [ euclidean_tVec( 3 ), pos_real_tVec( 1 ) ] )
self.meanRadius = 1
def SetPositionTangentVector( self, idx, pos_tVec ):
self.tVector[ idx ][ 0 ].SetTangentVector( pos_tVec )
def SetRadiusTangentVector( self, idx, rad_tVec ):
self.tVector[ idx ][ 1 ].SetTangentVector( rad_tVec )
def SetMeanRadius( self, meanRadius ):
self.meanRadius = meanRadius
def GetTangentVector(self):
return self.tVector
def SetTangentVector( self, tVec ):
if not len( tVec ) == self.nDim:
print( "Error : Dimension Mismatch" )
return
self.tVector = tVec
def InnerProduct( self, tVec1 ):
result = 0
for i in range( self.nDim ):
result += self.tVector[ i ][ 0 ].InnerProduct( tVec1.tVector[ i ][ 0 ] )
result += self.meanRadius * tVec1.meanRadius * self.tVector[ i ][ 1 ].InnerProduct( tVec1.tVector[ i ][ 1 ] )
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.tVector, self.meanRadius ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.tVector = infoList[ 2 ]
self.meanRadius = infoList[ 3 ]
class cmrep( object ):
def __init__( self, nDim ):
self.Type = "CMRep"
self.nDim = nDim
self.pt = []
self.pos = []
self.rad = []
for i in range( nDim ):
self.pt.append( [ euclidean( 3 ), pos_real( 1 ) ] )
self.pos.append( self.pt[ i ][ 0 ] )
self.rad.append( self.pt[ i ][ 1 ] )
self.meanRadius = 1
def SetPoint( self, pt ):
if not len( pt ) == self.nDim:
print( "Error : Dimensions does not match" )
return
self.pt = pt
self.pos = []
self.rad = []
for i in range( self.nDim ):
self.pos.append( self.pt[ i ][ 0 ] )
self.rad.append( self.pt[ i ][ 1 ] )
def UpdateMeanRadius( self ):
meanRad = 0
for i in range( self.nDim ):
meanRad += ( float( self.rad[ i ].pt[0] ) / float( self.nDim ) )
self.meanRadius = meanRad
return
def AppendAtom( self, pt = [ euclidean( 3 ), pos_real( 1 ) ] ):
self.nDim = self.nDim + 1
self.pt.append( pt )
self.pos.append( self.pt[ self.nDim - 1 ][ 0 ] )
self.rad.append( self.pt[ self.nDim - 1 ][ 1 ] )
self.meanRadius = ( self.meanRadius * ( self.nDim - 1 ) + pt[ 1 ].pt[ 0 ] ) / self.nDim
def SetPosition( self, idx, position=[0.0,0.0,0.0] ):
self.pos[ idx ].SetPoint( position )
self.pt[ idx ][ 0 ].SetPoint( position )
def SetRadius( self, idx, rad=1.0 ):
self.rad[ idx ].SetPoint( rad )
self.pt[ idx ][ 1 ].SetPoint( rad )
def ExponentialMap( self, tVec ):
if not tVec.Type == "CMRep_Tangent":
print( "Tangent Vector Type Mismatched" )
return
exp_pt = cmrep( self.nDim )
for i in range( self.nDim ):
exp_pt.pt[ i ][ 0 ] = self.pt[ i ][ 0 ].ExponentialMap( tVec.tVector[ i ][ 0 ] )
exp_pt.pt[ i ][ 1 ] = self.pt[ i ][ 1 ].ExponentialMap( tVec.tVector[ i ][ 1 ] )
return exp_pt
def LogMap( self, another_pt ):
if not another_pt.Type == "CMRep":
print( "Error: Component Type Mismatched" )
return
tVec = cmrep_tVec( self.nDim )
for i in range( self.nDim ):
tVec.tVector[ i ][ 0 ] = self.pt[ i ][ 0 ].LogMap( another_pt.pt[ i ][ 0 ] )
tVec.tVector[ i ][ 1 ] = self.pt[ i ][ 1 ].LogMap( another_pt.pt[ i ][ 1 ] )
return tVec
def GetPoistion( self ):
return self.pos
def GetRadius( self ):
return self.rad
def InnerProduct( self, ptA ):
result = 0
for i in range( self.nDim ):
result += self.pt[ i ][ 0 ].InnerProduct( ptA.pt[ i ][ 0 ] )
result += self.meanRadius * ptA.meanRadius * self.pt[ i ][ 1 ].InnerProduct( ptA.pt[ i ][ 1 ] )
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ProjectTangent( self, pt, tVec ):
vProjected = cmrep_tVec( self.nDim )
for i in range( self.nDim ):
vProjected.tVector[ i ][ 0 ] = self.pt[ i ][ 0 ].ProjectTangent( pt.pt[ i ][ 0 ], tVec.tVector[ i ][ 0 ] )
vProjected.tVector[ i ][ 1 ] = self.pt[ i ][ 1 ].ProjectTangent( pt.pt[ i ][ 1 ], tVec.tVector[ i ][ 1 ] )
return vProjected
def ParallelTranslate( self, v, w ):
wPrallelTranslated = cmrep_tVec( self.nDim )
for i in range( self.nDim ):
wPrallelTranslated.tVector[ i ][ 0 ] = self.pt[ i ][ 0 ].ParallelTranslate( v.tVector[ i ][ 0 ], w.tVector[ i ][ 0 ] )
wPrallelTranslated.tVector[ i ][ 1 ] = self.pt[ i ][ 1 ].ParallelTranslate( v.tVector[ i ][ 1 ], w.tVector[ i ][ 1 ] )
return wParallelTranslated
def ParallelTranslateToA( self, ptA, w ):
v = self.LogMap( ptA )
return ParallelTranslate( v, w )
def ParallelTranslateAtoB( self, a, b, w ):
v = a.LogMap( b )
return a.ParallelTranslate( v, w )
def AdjointGradientJacobi( self, v, j, dj ):
e_base = self
vNorm = v.norm()
jOutput = cmrep_tVec( self.nDim )
jOutputDash = cmrep_tVec( self.nDim )
for i in range( self.nDim ):
jOutput.tVector[ i ][ 0 ], jOutputDash.tVector[ i ][ 0 ] = self.pt[ i ][ 0 ].AdjointGradientJacobi( v.tVector[ i ][ 0 ], j.tVector[ i ][ 0 ], dj.tVector[ i ][ 0 ] )
jOutput.tVector[ i ][ 1 ], jOutputDash.tVector[ i ][ 1 ] = self.pt[ i ][ 1 ].AdjointGradientJacobi( v.tVector[ i ][ 1 ], j.tVector[ i ][ 1 ], dj.tVector[ i ][ 1 ] )
return jOutput, jOutputDash
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.pt, self.meanRadius ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.pt = infoList[ 2 ]
self.meanRadius = infoList[ 3 ]
class cmrep_abstract( object ):
def __init__( self, nDim ):
self.Type = "CMRep_Abstract"
self.nDim = nDim
# pt : center, scale, abstract position, radius
self.pt = [ euclidean(3), pos_real(1), sphere( 3 * ( nDim - 1 ) ), pos_real( nDim ) ]
self.center = self.pt[ 0 ]
self.scale = self.pt[ 1 ]
self.pos = self.pt[ 2 ]
self.rad = self.pt[ 3 ]
self.meanRadius = 1
self.meanRadius_Arr = np.array( nDim )
self.meanScale = 1
def SetPoint( self, pt ):
if not ( len( pt ) == 4 and pt[ 0 ].nDim == 3 and pt[ 1 ].nDim == 1 and pt[ 2 ].nDim == 3 * ( self.nDim - 1 ) and pt[ 3 ].nDim == self.nDim ):
print( "cmrep_abstract.SetPoint")
print( "Error : Dimensions does not match" )
return
self.pt = pt
self.center = pt[ 0 ]
self.scale = pt[ 1 ]
self.pos = pt[ 2 ]
self.rad = pt[ 3 ]
self.UpdateMeanRadius()
def UpdateMeanRadius( self ):
meanRad = 0
for i in range( self.nDim ):
meanRad += ( float( self.rad.pt[i] ) / float( self.nDim ) )
self.meanRadius = meanRad
return
def SetMeanScale( self, s ):
self.meanScale = s
return
def SetDataSetMeanRadiusArr( self, DataSetRadiusArr ):
self.meanRadius_Arr = DataSetRadiusArr
def ExponentialMap( self, tVec ):
if not tVec.Type == "CMRep_Abstract_Tangent":
print( "Tangent Vector Type Mismatched" )
return
exp_pt = cmrep_abstract( self.nDim )
exp_pt_arr = []
for i in range( 4 ):
exp_pt_arr.append( self.pt[ i ].ExponentialMap( tVec.tVector[ i ] ) )
exp_pt.SetPoint( exp_pt_arr )
return exp_pt
def LogMap( self, another_pt ):
if not another_pt.Type == "CMRep_Abstract":
print( "Error: Component Type Mismatched" )
return
tVec = cmrep_abstract_tVec( self.nDim )
for i in range( 4 ):
tVec.tVector[ i ] = self.pt[ i ].LogMap( another_pt.pt[ i ] )
return tVec
def GetScale( self ):
return self.scale
def GetCenter( self ):
return self.center
def GetPoistion( self ):
return self.pos
def GetRadius( self ):
return self.rad
def InnerProduct( self, ptA ):
result = 0
# Center
result += self.pt[ 0 ].InnerProduct( ptA.pt[ 0 ] )
# Scale
result += self.meanScale * ptA.meanScale * self.pt[ 1 ].InnerProduct( ptA.pt[ 1 ] )
# Abstract Position
result += self.meanScale * ptA.meanScale * self.pt[ 2 ].InnerProduct( ptA.pt[ 2 ] )
# Radius
result += self.meanRadius * ptA.meanRadius * self.pt[ 3 ].InnerProduct( ptA.pt[ 3 ] )
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ProjectTangent( self, pt, tVec ):
vProjected = cmrep_abstract_tVec( self.nDim )
for i in range( 4 ):
vProjected_tVector[ i ] = self.pt[ i ].ProjectTangent( pt.pt[ i ], tVec.tVector[ i ] )
return vProjected
def ParallelTranslate( self, v, w ):
wPrallelTranslated = cmrep_tVec( self.nDim )
for i in range( 4 ):
wParallelTranslated.tVector[ i ] = self.pt[ i ].ParallelTranslate( v.tVector[ i ], w.tVector[ i ] )
return wParallelTranslated
def ParallelTranslateToA( self, ptA, w ):
v = self.LogMap( ptA )
return ParallelTranslate( v, w )
def ParallelTranslateAtoB( self, a, b, w ):
v = a.LogMap( b )
return a.ParallelTranslate( v, w )
def AdjointGradientJacobi( self, v, j, dj ):
e_base = self
vNorm = v.norm()
jOutput = cmrep_abstract_tVec( self.nDim )
jOutputDash = cmrep_abstract_tVec( self.nDim )
for i in range( 4 ):
jOutput.tVector[ i ], jOutputDash.tVector[ i ] = self.pt[ i ].AdjointGradientJacobi( v.tVector[ i ], j.tVector[ i ], dj.tVector[ i ] )
return jOutput, jOutputDash
def GetEuclideanLocations( self ):
H_sub = HelmertSubmatrix( self.nDim )
H_sub_T = H_sub.T
# Relative Positions on a 3(n-1)-1 sphere
pos_abstr_sphere_matrix = np.array( self.pt[ 2 ].pt ).reshape( -1, 3 )
# Relative Positions on Euclidean
pos_abstr_euclidean_matrix = np.dot( H_sub_T, pos_abstr_sphere_matrix )
# Multiply Scale
# print( self.scale.pt[ 0 ] )
pos_scale_eucldiean_matrix = np.multiply( pos_abstr_euclidean_matrix, self.scale.pt[ 0 ] )
# Add Center of Mass
pos_world_coord_euclidean_matrix = np.zeros( [ self.nDim, 3 ] )
for i in range( self.nDim ):
pos_world_coord_euclidean_matrix[ i, : ] = np.add( pos_scale_eucldiean_matrix[ i, : ], self.center.pt )
return pos_world_coord_euclidean_matrix
def GetAbstractEuclideanLocations( self ):
H_sub = HelmertSubmatrix( self.nDim )
H_sub_T = H_sub.T
# Relative Positions on a 3(n-1)-1 sphere
pos_abstr_sphere_matrix = np.array( self.pt[ 2 ].pt ).reshape( -1, 3 )
# Relative Positions on Euclidean
pos_abstr_euclidean_matrix = np.dot( H_sub_T, pos_abstr_sphere_matrix )
# # Multiply Scale
# # print( self.scale.pt[ 0 ] )
# pos_scale_eucldiean_matrix = np.multiply( pos_abstr_euclidean_matrix, self.scale.pt[ 0 ] )
# # Add Center of Mass
# pos_world_coord_euclidean_matrix = np.zeros( [ self.nDim, 3 ] )
# for i in range( self.nDim ):
# pos_world_coord_euclidean_matrix[ i, : ] = np.add( pos_scale_eucldiean_matrix[ i, : ], self.center.pt )
return pos_abstr_euclidean_matrix
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.pt, self.meanRadius ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.pt = infoList[ 2 ]
self.center = self.pt[ 0 ]
self.scale = self.pt[ 1 ]
self.pos = self.pt[ 2 ]
self.rad = self.pt[ 3 ]
self.meanRadius = infoList[ 3 ]
class cmrep_abstract_tVec( object ):
def __init__( self, nDim ):
self.Type = "CMRep_Abstract_Tangent"
self.nDim = nDim
# tVector : center, scale, abstract position, radius
self.tVector = [ euclidean_tVec( 3 ), pos_real_tVec( 1 ), sphere_tVec( 3 * ( nDim - 1 ) ), pos_real_tVec( nDim ) ]
self.meanRadius = 1
self.meanScale = 1
# def SetPositionTangentVector( self, idx, pos_tVec ):
# self.tVector[ idx ][ 0 ].SetTangentVector( pos_tVec )
# def SetRadiusTangentVector( self, idx, rad_tVec ):
# self.tVector[ idx ][ 1 ].SetTangentVector( rad_tVec )
def SetTangentVectorFromArray( self, tVecArr ):
if not len( tVecArr ) == ( 4 + self.tVector[ 2 ].nDim + self.tVector[ 3 ].nDim ):
print( "Error : Dimension Mismatch" )
return
# Center
self.tVector[ 0 ].tVector[ 0 ] = tVecArr[ 0 ]
self.tVector[ 0 ].tVector[ 1 ] = tVecArr[ 1 ]
self.tVector[ 0 ].tVector[ 2 ] = tVecArr[ 2 ]
# Scale
self.tVector[ 1 ].tVector[ 0 ] = tVecArr[ 3 ]
# PreShape
for k in range( self.tVector[ 2 ].nDim ):
self.tVector[ 2 ].tVector[ k ] = tVecArr[ k + 4 ]
# Radius
for k in range( self.tVector[ 3 ].nDim ):
self.tVector[ 3 ].tVector[ k ] = tVecArr[ k + 4 + self.tVector[ 2 ].nDim ]
def SetMeanRadius( self, meanRadius ):
self.meanRadius = meanRadius
def SetMeanScale( self, meanScale ):
self.meanScale = meanScale
return
def GetTangentVector(self):
return self.tVector
def SetTangentVector( self, tVec ):
if not ( len( tVec ) == 4 and tVec[ 0 ].nDim == 3 and tVec[ 1 ].nDim == 1 and tVec[ 2 ].nDim == ( 3 * ( self.nDim - 1 ) ) and tVec[ 3 ].nDim == self.nDim ):
print( "Error : Dimension Mismatch" )
return
self.tVector = tVec
def InnerProduct( self, tVec1 ):
result = 0
# Center
result += self.tVector[ 0 ].InnerProduct( tVec1.tVector[ 0 ] )
# Scale
result += self.meanScale * tVec1.meanScale * self.tVector[ 1 ].InnerProduct( tVec1.tVector[ 1 ] )
# Abstract Position
result += self.meanScale * tVec1.meanScale * self.tVector[ 2 ].InnerProduct( tVec1.tVector[ 2 ] )
# Radius
result += self.meanRadius * tVec1.meanRadius * self.tVector[ 3 ].InnerProduct( tVec1.tVector[ 3 ] )
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ScalarMultiply( self, t ):
tVector_t = cmrep_abstract_tVec( self.nDim )
for i in range( 4 ):
tVector_t.tVector[ i ] = self.tVector[ i ].ScalarMultiply( t )
return tVector_t
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.tVector, self.meanRadius ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.tVector = infoList[ 2 ]
self.meanRadius = infoList[ 3 ]
# CM-Rep Abstract Manifold with Boundary Normals
class cmrep_abstract_normal( object ):
def __init__( self, nDim ):
self.Type = "CMRep_Abstract_Normal"
self.nDim = nDim
# pt : center, scale, abstract position, radius, boundary normals
self.bndrNormal1 = []
self.bndrNormal2 = []
for i in range( nDim ):
self.bndrNormal1.append( sphere( 3 ) )
self.bndrNormal2.append( sphere( 3 ) )
self.pt = [ euclidean(3), pos_real(1), sphere( 3 * ( nDim - 1 ) ), pos_real( nDim ), self.bndrNormal1, self.bndrNormal2 ]
self.center = self.pt[ 0 ]
self.scale = self.pt[ 1 ]
self.pos = self.pt[ 2 ]
self.rad = self.pt[ 3 ]
self.meanRadius = 1
self.meanRadius_Arr = np.array( nDim )
self.meanScale = 1
def SetPoint( self, pt ):
if not ( len( pt ) == 6 and pt[ 0 ].nDim == 3 and pt[ 1 ].nDim == 1 and pt[ 2 ].nDim == 3 * ( self.nDim - 1 ) and pt[ 3 ].nDim == self.nDim and len( pt[ 4 ] ) == self.nDim and len( pt[ 5 ] ) == self.nDim ):
print( "cmrep_abstract_normal.SetPoint")
print( "Error : Dimensions does not match" )
return
self.pt = pt
self.center = pt[ 0 ]
self.scale = pt[ 1 ]
self.pos = pt[ 2 ]
self.rad = pt[ 3 ]
self.bndrNormal1 = pt[ 4 ]
self.bndrNormal2 = pt[ 5 ]
self.UpdateMeanRadius()
def UpdateMeanRadius( self ):
meanRad = 0
for i in range( self.nDim ):
meanRad += ( float( self.rad.pt[i] ) / float( self.nDim ) )
self.meanRadius = meanRad
return
def SetMeanScale( self, s ):
self.meanScale = s
return
def SetDataSetMeanRadiusArr( self, DataSetRadiusArr ):
self.meanRadius_Arr = DataSetRadiusArr
def ExponentialMap( self, tVec ):
if not tVec.Type == "CMRep_Abstract_Normal_Tangent":
print( "Tangent Vector Type Mismatched" )
return
exp_pt = cmrep_abstract_normal( self.nDim )
exp_pt_arr = []
for i in range( 4 ):
exp_pt_arr.append( self.pt[ i ].ExponentialMap( tVec.tVector[ i ] ) )
exp_pt_bndr1 = []
exp_pt_bndr2 = []
for i in range( self.nDim ):
exp_pt_bndr1.append( self.pt[ 4 ][ i ].ExponentialMap( tVec.tVector[ 4 ][ i ] ) )
exp_pt_bndr2.append( self.pt[ 5 ][ i ].ExponentialMap( tVec.tVector[ 5 ][ i ] ) )
exp_pt_arr.append( exp_pt_bndr1 )
exp_pt_arr.append( exp_pt_bndr2 )
exp_pt.SetPoint( exp_pt_arr )
return exp_pt
def LogMap( self, another_pt ):
if not another_pt.Type == "CMRep_Abstract_Normal":
print( "Error: Component Type Mismatched" )
return
tVec = cmrep_abstract_normal_tVec( self.nDim )
for i in range( 4 ):
tVec.tVector[ i ] = self.pt[ i ].LogMap( another_pt.pt[ i ] )
for i in range( self.nDim ):
tVec.tVector[ 4 ][ i ] = self.pt[ 4 ][ i ].LogMap( another_pt.pt[ 4 ][ i ] )
tVec.tVector[ 5 ][ i ] = self.pt[ 5 ][ i ].LogMap( another_pt.pt[ 5 ][ i ] )
return tVec
def GetScale( self ):
return self.scale
def GetCenter( self ):
return self.center
def GetPoistion( self ):
return self.pos
def GetRadius( self ):
return self.rad
def InnerProduct( self, ptA ):
result = 0
# Center
result += self.pt[ 0 ].InnerProduct( ptA.pt[ 0 ] )
# Scale
result += self.meanScale * ptA.meanScale * self.pt[ 1 ].InnerProduct( ptA.pt[ 1 ] )
# Abstract Position
result += self.meanScale * ptA.meanScale * self.pt[ 2 ].InnerProduct( ptA.pt[ 2 ] )
# Radius
result += self.meanRadius * ptA.meanRadius * self.pt[ 3 ].InnerProduct( ptA.pt[ 3 ] )
for i in range( nDim ):
# bndr normal 1
result += self.meanRadius * ptA.meanRadius * self.pt[ 4 ][ i ].InnerProduct( ptA.pt[ 4 ][ i ] )
# bndr normal 2
result += self.meanRadius * ptA.meanRadius * self.pt[ 5 ][ i ].InnerProduct( ptA.pt[ 5 ][ i ] )
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ProjectTangent( self, pt, tVec ):
vProjected = cmrep_abstract_normal_tVec( self.nDim )
for i in range( 4 ):
vProjected_tVector[ i ] = self.pt[ i ].ProjectTangent( pt.pt[ i ], tVec.tVector[ i ] )
for i in range( self.nDim ):
vProjected_tVector[ 4 ][ i ] = self.pt[ 4 ][ i ].ProjectTangent( pt.pt[ 4 ][ i ], tVec.tVector[ 4 ][ i ] )
vProjected_tVector[ 5 ][ i ] = self.pt[ 5 ][ i ].ProjectTangent( pt.pt[ 5 ][ i ], tVec.tVector[ 5 ][ i ] )
return vProjected
def ParallelTranslate( self, v, w ):
wPrallelTranslated = cmrep_abstract_normal_tVec( self.nDim )
for i in range( 4 ):
wParallelTranslated.tVector[ i ] = self.pt[ i ].ParallelTranslate( v.tVector[ i ], w.tVector[ i ] )
for i in range( self.nDim ):
wParallelTranslated.tVector[ 4 ][ i ] = self.pt[ 4 ][ i ].ParallelTranslate( v.tVector[ 4 ][ i ], w.tVector[ 4 ][ i ] )
wParallelTranslated.tVector[ 5 ][ i ] = self.pt[ 5 ][ i ].ParallelTranslate( v.tVector[ 5 ][ i ], w.tVector[ 5 ][ i ] )
return wParallelTranslated
def ParallelTranslateToA( self, ptA, w ):
v = self.LogMap( ptA )
return ParallelTranslate( v, w )
def ParallelTranslateAtoB( self, a, b, w ):
v = a.LogMap( b )
return a.ParallelTranslate( v, w )
def AdjointGradientJacobi( self, v, j, dj ):
e_base = self
vNorm = v.norm()
jOutput = cmrep_abstract_normal_tVec( self.nDim )
jOutputDash = cmrep_abstract_normal_tVec( self.nDim )
for i in range( 4 ):
jOutput.tVector[ i ], jOutputDash.tVector[ i ] = self.pt[ i ].AdjointGradientJacobi( v.tVector[ i ], j.tVector[ i ], dj.tVector[ i ] )
for i in range( self.nDim ):
jOutput.tVector[ 4 ][ i ], jOutputDash.tVector[ 4 ][ i ] = self.pt[ 4 ][ i ].AdjointGradientJacobi( v.tVector[ 4 ][ i ], j.tVector[ 4 ][ i ], dj.tVector[ 4 ][ i ] )
jOutput.tVector[ 5 ][ i ], jOutputDash.tVector[ 5 ][ i ] = self.pt[ 5 ][ i ].AdjointGradientJacobi( v.tVector[ 5 ][ i ], j.tVector[ 5 ][ i ], dj.tVector[ 5 ][ i ] )
return jOutput, jOutputDash
def GetEuclideanLocations( self ):
H_sub = HelmertSubmatrix( self.nDim )
H_sub_T = H_sub.T
# Relative Positions on a 3(n-1)-1 sphere
pos_abstr_sphere_matrix = np.array( self.pt[ 2 ].pt ).reshape( -1, 3 )
# Relative Positions on Euclidean
pos_abstr_euclidean_matrix = np.dot( H_sub_T, pos_abstr_sphere_matrix )
# Multiply Scale
# print( self.scale.pt[ 0 ] )
pos_scale_eucldiean_matrix = np.multiply( pos_abstr_euclidean_matrix, self.scale.pt[ 0 ] )
# Add Center of Mass
pos_world_coord_euclidean_matrix = np.zeros( [ self.nDim, 3 ] )
for i in range( self.nDim ):
pos_world_coord_euclidean_matrix[ i, : ] = np.add( pos_scale_eucldiean_matrix[ i, : ], self.center.pt )
return pos_world_coord_euclidean_matrix
def GetAbstractEuclideanLocations( self ):
H_sub = HelmertSubmatrix( self.nDim )
H_sub_T = H_sub.T
# Relative Positions on a 3(n-1)-1 sphere
pos_abstr_sphere_matrix = np.array( self.pt[ 2 ].pt ).reshape( -1, 3 )
# Relative Positions on Euclidean
pos_abstr_euclidean_matrix = np.dot( H_sub_T, pos_abstr_sphere_matrix )
# # Multiply Scale
# # print( self.scale.pt[ 0 ] )
# pos_scale_eucldiean_matrix = np.multiply( pos_abstr_euclidean_matrix, self.scale.pt[ 0 ] )
# # Add Center of Mass
# pos_world_coord_euclidean_matrix = np.zeros( [ self.nDim, 3 ] )
# for i in range( self.nDim ):
# pos_world_coord_euclidean_matrix[ i, : ] = np.add( pos_scale_eucldiean_matrix[ i, : ], self.center.pt )
return pos_abstr_euclidean_matrix
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.pt, self.meanRadius ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.pt = infoList[ 2 ]
self.center = self.pt[ 0 ]
self.scale = self.pt[ 1 ]
self.pos = self.pt[ 2 ]
self.rad = self.pt[ 3 ]
self.bndrNormal1 = self.pt[ 4 ]
self.bndrNormal2 = self.pt[ 5 ]
self.meanRadius = infoList[ 3 ]
class cmrep_abstract_normal_tVec( object ):
def __init__( self, nDim ):
self.Type = "CMRep_Abstract_Normal_Tangent"
self.nDim = nDim
# tVector : center, scale, abstract position, radius, bndr normals
self.tVecNormal1 = []
self.tVecNormal2 = []
for i in range( nDim ):
self.tVecNormal1.append( sphere_tVec( 3 ) )
self.tVecNormal2.append( sphere_tVec( 3 ) )
self.tVector = [ euclidean_tVec( 3 ), pos_real_tVec( 1 ), sphere_tVec( 3 * ( nDim - 1 ) ), pos_real_tVec( nDim ), self.tVecNormal1, self.tVecNormal2 ]
self.meanRadius = 1
self.meanScale = 1
# def SetPositionTangentVector( self, idx, pos_tVec ):
# self.tVector[ idx ][ 0 ].SetTangentVector( pos_tVec )
# def SetRadiusTangentVector( self, idx, rad_tVec ):
# self.tVector[ idx ][ 1 ].SetTangentVector( rad_tVec )
def SetTangentVectorFromArray( self, tVecArr ):
if not len( tVecArr ) == ( 4 + self.tVector[ 2 ].nDim + self.tVector[ 3 ].nDim + 6 * len( self.tVecNormal1 ) ):
print( "Error : Dimension Mismatch" )
return
# Center
self.tVector[ 0 ].tVector[ 0 ] = tVecArr[ 0 ]
self.tVector[ 0 ].tVector[ 1 ] = tVecArr[ 1 ]
self.tVector[ 0 ].tVector[ 2 ] = tVecArr[ 2 ]
# Scale
self.tVector[ 1 ].tVector[ 0 ] = tVecArr[ 3 ]
# PreShape
for k in range( self.tVector[ 2 ].nDim ):
self.tVector[ 2 ].tVector[ k ] = tVecArr[ k + 4 ]
# Radius
for k in range( self.tVector[ 3 ].nDim ):
self.tVector[ 3 ].tVector[ k ] = tVecArr[ k + 4 + self.tVector[ 2 ].nDim ]
# Boundary Normals
for i in range( self.nDim ):
self.tVector[ 4 ][ i ].tVector[ 0 ] = tVecArr[ i * 3 + 4 + self.tVector[ 2 ].nDim + self.tVector[ 3 ].nDim ]
self.tVector[ 4 ][ i ].tVector[ 1 ] = tVecArr[ i * 3 + 1 + 4 + self.tVector[ 2 ].nDim + self.tVector[ 3 ].nDim ]
self.tVector[ 4 ][ i ].tVector[ 2 ] = tVecArr[ i * 3 + 2 + 4 + self.tVector[ 2 ].nDim + self.tVector[ 3 ].nDim ]
for i in range( self.nDim ):
self.tVector[ 5 ][ i ].tVector[ 0 ] = tVecArr[ i * 3 + 4 + self.tVector[ 2 ].nDim + self.tVector[ 3 ].nDim + 3 * len( self.tVector[ 4 ] ) ]
self.tVector[ 5 ][ i ].tVector[ 1 ] = tVecArr[ i * 3 + 1 + 4 + self.tVector[ 2 ].nDim + self.tVector[ 3 ].nDim + 3 * len( self.tVector[ 4 ] ) ]
self.tVector[ 5 ][ i ].tVector[ 2 ] = tVecArr[ i * 3 + 2 + 4 + self.tVector[ 2 ].nDim + self.tVector[ 3 ].nDim + 3 * len( self.tVector[ 4 ] ) ]
def GetTangentVectorArray( self ):
tVecArr = []
# Center
tVecArr.append( self.tVector[ 0 ].tVector[ 0 ] )
tVecArr.append( self.tVector[ 0 ].tVector[ 1 ] )
tVecArr.append( self.tVector[ 0 ].tVector[ 2 ] )
# Scale
tVecArr.append( self.tVector[ 1 ].tVector[ 0 ] )
# PreShape
for k in range( self.tVector[ 2 ].nDim ):
tVecArr.append( self.tVector[ 2 ].tVector[ k ] )
# Radius
for k in range( self.tVector[ 3 ].nDim ):
tVecArr.append( self.tVector[ 3 ].tVector[ k ] )
# Boundary Normals
for i in range( self.nDim ):
tVecArr.append( self.tVector[ 4 ][ i ].tVector[ 0 ] )
tVecArr.append( self.tVector[ 4 ][ i ].tVector[ 1 ] )
tVecArr.append( self.tVector[ 4 ][ i ].tVector[ 2 ] )
for i in range( self.nDim ):
tVecArr.append( self.tVector[ 5 ][ i ].tVector[ 0 ] )
tVecArr.append( self.tVector[ 5 ][ i ].tVector[ 1 ] )
tVecArr.append( self.tVector[ 5 ][ i ].tVector[ 2 ] )
return tVecArr
def SetMeanRadius( self, meanRadius ):
self.meanRadius = meanRadius
def SetMeanScale( self, meanScale ):
self.meanScale = meanScale
return
def GetTangentVector(self):
return self.tVector
def SetTangentVector( self, tVec ):
if not ( len( tVec ) == 6 and tVec[ 0 ].nDim == 3 and tVec[ 1 ].nDim == 1 and tVec[ 2 ].nDim == ( 3 * ( self.nDim - 1 ) ) and tVec[ 3 ].nDim == self.nDim and len( tVec[ 4 ] ) == self.nDim and len( tVec[ 5 ] ) == self.nDim ):
print( "cmrep_abstract_normal_tVec:SetTangentVector" )
print( self.nDim )
print( len( tVec[ 4 ] ) )
print( len( tVec[ 5 ] ) )
print( "Error : Dimension Mismatch" )
return
self.tVector = tVec
def InnerProduct( self, tVec1 ):
result = 0
# Center
result += self.tVector[ 0 ].InnerProduct( tVec1.tVector[ 0 ] )
# Scale
result += self.meanScale * tVec1.meanScale * self.tVector[ 1 ].InnerProduct( tVec1.tVector[ 1 ] )
# Abstract Position
result += self.meanScale * tVec1.meanScale * self.tVector[ 2 ].InnerProduct( tVec1.tVector[ 2 ] )
# Radius
result += self.meanRadius * tVec1.meanRadius * self.tVector[ 3 ].InnerProduct( tVec1.tVector[ 3 ] )
for i in range( self.nDim ):
# bndr normal 1
result += self.meanRadius * tVec1.meanRadius * self.tVector[ 4 ][ i ].InnerProduct( tVec1.tVector[ 4 ][ i ] )
# bndr normal 2
result += self.meanRadius * tVec1.meanRadius * self.tVector[ 5 ][ i ].InnerProduct( tVec1.tVector[ 5 ][ i ] )
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ScalarMultiply( self, t ):
tVector_t = cmrep_abstract_normal_tVec( self.nDim )
for i in range( 4 ):
tVector_t.tVector[ i ] = self.tVector[ i ].ScalarMultiply( t )
for i in range( self.nDim ):
tVector_t.tVector[ 4 ][ i ] = self.tVector[ 4 ][ i ].ScalarMultiply( t )
tVector_t.tVector[ 5 ][ i ] = self.tVector[ 5 ][ i ].ScalarMultiply( t )
return tVector_t
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.tVector, self.meanRadius ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.tVector = infoList[ 2 ]
self.meanRadius = infoList[ 3 ]
###############################################################
##### Miscelleneous #####
###############################################################
def HelmertSubmatrix( nAtoms ):
# Create a Helmert submatrix - similarity-invariant
H = np.zeros( [ nAtoms - 1, nAtoms ] )
for k in range( nAtoms - 1 ):
h_k = -np.divide( 1.0, np.sqrt( ( k + 1 ) * ( k + 2 ) ) )
neg_kh_k = np.multiply( h_k, -( k + 1 ) )
for h in range( k + 1 ):
H[ k, h ] = h_k
H[ k, k + 1 ] = neg_kh_k
return H
def HelmertMatrix( nAtoms ):
# Create a Helmert matrix - similiarity-invariant : First row - Center of Gravity (mass) (uniform mass of points)
H_full = np.zeors( [ nAtoms, nAtoms ] )
for h in range( nAtoms ):
H_full[ 0, h ] = np.divide( 1, np.sqrt( nAtoms ) )
for k in range( 1, nAtoms, 1 ):
h_k = -np.divide( 1.0, np.sqrt( ( k ) * ( k + 1 ) ) )
neg_kh_k = np.multiply( h_k, -k )
for h in range( k ):
H_full[ k, h ] = h_k
H_full[ k, k ] = neg_kh_k
return H_full
##########################################################################
## CM-Rep with Boundary Normals ##
##########################################################################
class cmrep_bndr_normals_tVec( object ):
def __init__( self, nDim ):
self.Type = "CMRep_BNDRNormals_Tangent"
self.nDim = nDim
self.tVector = []
for i in range( self.nDim ):
self.tVector.append( [ euclidean_tVec( 3 ), pos_real_tVec( 1 ) ] )
self.meanRadius = 1
def SetPositionTangentVector( self, idx, pos_tVec ):
self.tVector[ idx ][ 0 ].SetTangentVector( pos_tVec )
def SetRadiusTangentVector( self, idx, rad_tVec ):
self.tVector[ idx ][ 1 ].SetTangentVector( rad_tVec )
def SetMeanRadius( self, meanRadius ):
self.meanRadius = meanRadius
def GetTangentVector(self):
return self.tVector
def SetTangentVector( self, tVec ):
if not len( tVec ) == self.nDim:
print( "Error : Dimension Mismatch" )
return
self.tVector = tVec
def InnerProduct( self, tVec1 ):
result = 0
for i in range( self.nDim ):
result += self.tVector[ i ][ 0 ].InnerProduct( tVec1.tVector[ i ][ 0 ] )
result += self.meanRadius * tVec1.meanRadius * self.tVector[ i ][ 1 ].InnerProduct( tVec1.tVector[ i ][ 1 ] )
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.tVector, self.meanRadius ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.tVector = infoList[ 2 ]
self.meanRadius = infoList[ 3 ]
class cmrep_bndr_normals( object ):
def __init__( self, nDim ):
self.Type = "CMRep_BNDRNormals"
self.nDim = nDim
self.pt = []
self.pos = []
self.rad = []
self.spoke1 = []
self.spoke2 = []
self.edge = []
for i in range( nDim ):
self.pt.append( [ euclidean( 3 ), pos_real( 1 ), sphere( 3 ), sphere( 3 ) ] )
self.pos.append( self.pt[ i ][ 0 ] )
self.rad.append( self.pt[ i ][ 1 ] )
self.spoke1.append( self.pt[ i ][ 2 ] )
self.spoke2.append( self.pt[ i ][ 3 ] )
self.edge.append( 0 )
self.meanRadius = 1
def SetPoint( self, pt ):
if not len( pt ) == self.nDim:
print( "Error : Dimensions does not match" )
return
self.pt = pt
self.pos = []
self.rad = []
for i in range( self.nDim ):
self.pos.append( self.pt[ i ][ 0 ] )
self.rad.append( self.pt[ i ][ 1 ] )
def UpdateMeanRadius( self ):
meanRad = 0
for i in range( self.nDim ):
meanRad += ( float( self.rad[ i ].pt[0] ) / float( self.nDim ) )
self.meanRadius = meanRad
return
def AppendAtom( self, pt = [ euclidean( 3 ), pos_real( 1 ) ] ):
self.nDim = self.nDim + 1
self.pt.append( pt )
self.pos.append( self.pt[ self.nDim - 1 ][ 0 ] )
self.rad.append( self.pt[ self.nDim - 1 ][ 1 ] )
self.meanRadius = ( self.meanRadius * ( self.nDim - 1 ) + pt[ 1 ].pt[ 0 ] ) / self.nDim
def SetPosition( self, idx, position=[0.0,0.0,0.0] ):
self.pos[ idx ].SetPoint( position )
self.pt[ idx ][ 0 ].SetPoint( position )
def SetRadius( self, idx, rad=1.0 ):
self.rad[ idx ].SetPoint( rad )
self.pt[ idx ][ 1 ].SetPoint( rad )
def SetSpoke1( self, idx, spoke=[ 0, 0, 1 ] ):
self.spoke1[ idx ].SetPoint( spoke )
self.pt[ idx ][ 2 ].SetPoint( spoke )
def SetSpoke2( self, idx, spoke=[ 0, 0, 1 ] ):
self.spoke2[ idx ].SetPoint( spoke )
self.pt[ idx ][ 3 ].SetPoint( spoke )
def ExponentialMap( self, tVec ):
if not tVec.Type == "CMRep_BNDRNormals_Tangent":
print( "Tangent Vector Type Mismatched" )
return
exp_pt = cmrep( self.nDim )
for i in range( self.nDim ):
exp_pt.pt[ i ][ 0 ] = self.pt[ i ][ 0 ].ExponentialMap( tVec.tVector[ i ][ 0 ] )
exp_pt.pt[ i ][ 1 ] = self.pt[ i ][ 1 ].ExponentialMap( tVec.tVector[ i ][ 1 ] )
return exp_pt
def LogMap( self, another_pt ):
if not another_pt.Type == "CMRep_BNDRNormals":
print( "Error: Component Type Mismatched" )
return
tVec = cmrep_tVec( self.nDim )
for i in range( self.nDim ):
tVec.tVector[ i ][ 0 ] = self.pt[ i ][ 0 ].LogMap( another_pt.pt[ i ][ 0 ] )
tVec.tVector[ i ][ 1 ] = self.pt[ i ][ 1 ].LogMap( another_pt.pt[ i ][ 1 ] )
return tVec
def GetPoistion( self ):
return self.pos
def GetRadius( self ):
return self.rad
def InnerProduct( self, ptA ):
result = 0
for i in range( self.nDim ):
result += self.pt[ i ][ 0 ].InnerProduct( ptA.pt[ i ][ 0 ] )
result += self.meanRadius * ptA.meanRadius * self.pt[ i ][ 1 ].InnerProduct( ptA.pt[ i ][ 1 ] )
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ProjectTangent( self, pt, tVec ):
vProjected = cmrep_tVec( self.nDim )
for i in range( self.nDim ):
vProjected.tVector[ i ][ 0 ] = self.pt[ i ][ 0 ].ProjectTangent( pt.pt[ i ][ 0 ], tVec.tVector[ i ][ 0 ] )
vProjected.tVector[ i ][ 1 ] = self.pt[ i ][ 1 ].ProjectTangent( pt.pt[ i ][ 1 ], tVec.tVector[ i ][ 1 ] )
return vProjected
def ParallelTranslate( self, v, w ):
wPrallelTranslated = cmrep_tVec( self.nDim )
for i in range( self.nDim ):
wPrallelTranslated.tVector[ i ][ 0 ] = self.pt[ i ][ 0 ].ParallelTranslate( v.tVector[ i ][ 0 ], w.tVector[ i ][ 0 ] )
wPrallelTranslated.tVector[ i ][ 1 ] = self.pt[ i ][ 1 ].ParallelTranslate( v.tVector[ i ][ 1 ], w.tVector[ i ][ 1 ] )
return wParallelTranslated
def ParallelTranslateToA( self, ptA, w ):
v = self.LogMap( ptA )
return ParallelTranslate( v, w )
def ParallelTranslateAtoB( self, a, b, w ):
v = a.LogMap( b )
return a.ParallelTranslate( v, w )
def AdjointGradientJacobi( self, v, j, dj ):
e_base = self
vNorm = v.norm()
jOutput = cmrep_tVec( self.nDim )
jOutputDash = cmrep_tVec( self.nDim )
for i in range( self.nDim ):
jOutput.tVector[ i ][ 0 ], jOutputDash.tVector[ i ][ 0 ] = self.pt[ i ][ 0 ].AdjointGradientJacobi( v.tVector[ i ][ 0 ], j.tVector[ i ][ 0 ], dj.tVector[ i ][ 0 ] )
jOutput.tVector[ i ][ 1 ], jOutputDash.tVector[ i ][ 1 ] = self.pt[ i ][ 1 ].AdjointGradientJacobi( v.tVector[ i ][ 1 ], j.tVector[ i ][ 1 ], dj.tVector[ i ][ 1 ] )
return jOutput, jOutputDash
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.pt, self.meanRadius ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.pt = infoList[ 2 ]
self.meanRadius = infoList[ 3 ]
self.pos = self.pt[ 0 ]
self.rad = self.pt[ 1 ]
self.spoke1 = self.pt[ 2 ]
self.spoke2 = self.pt[ 3 ]
##########################################################################
## Kendall 2D Shape Space ##
##########################################################################
class kendall2D_tVec( object ):
# def __init__( self ):
# self.Type = "Sphere_Tangent"
# self.nDim = 3
# self.tVector = [ 0, 0, 0 ]
def __init__( self, nPt ):
self.Type = "Kendall2D_Tangent"
self.nPt = nPt
self.nDim = nPt - 2
self.tVector = np.zeros( [ 2, nPt ] )
def GetTangentVector(self):
return self.tVector
def SetTangentVector(self, tVec):
if not tVec.shape[ 1 ] == self.nPt:
print( "Error : # of points does not match" )
return
if not tVec.shape[ 0 ] == 2:
print( "Error : Tangent vector should be 2D" )
return
self.tVector = tVec
def InnerProduct( self, tVec1 ):
result = 0
for i in range( self.nPt ):
for j in range( 2 ):
result += self.tVector[ j, i ] * tVec1.tVector[ j, i ]
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ScalarMultiply( self, t ):
tVector_t = kendall2D_tVec( self.nPt )
for i in range( self.nPt ):
for j in range( 2 ):
tVector_t.tVector[ j, i ] = self.tVector[ j, i ] * t
return tVector_t
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.nPt, self.tVector, False ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.nPt = infoList[ 2 ]
self.tVector = infoList[ 3 ]
class kendall2D( object ):
def __init__( self, nPt ):
self.Type = "Kendall2D"
self.nPt = nPt
self.nDim = nPt - 2
pt_base = np.zeros( [ 2, nPt ] )
pt_base[ 0, 0 ] = 1
pt_base[ 0, 1 ] = 0
self.pt = pt_base
def SetPoint( self, pt ):
if not pt.shape[ 1 ] == self.nPt:
print( "Error : # of Points does not match" )
return
if not pt.shape[ 0 ] == 2:
print( "Error : Point should be 2D" )
return
if not np.linalg.norm( pt ) == 1:
# print( "Warning : The point is not on a sphere")
self.pt = np.asmatrix( pt )
return
self.pt = np.asmatrix( pt )
def GetPoint( self ):
return self.pt
def InnerProduct( self, ptA ):
result = 0
for i in range( self.nPt ):
for j in range( 2 ):
result += self.pt[ j, i ] * ptA.pt[ j, i ]
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ExponentialMap( self, tVec ):
theta = tVec.norm()
if theta < 1e-12:
exp_pt = kendall2D( self.nPt )
exp_pt.pt = self.pt
return exp_pt
if theta > np.pi * 2:
theta = np.mod( theta, np.pi * 2 )
exp_pt = kendall2D( self.nPt )
lhs = np.multiply( np.cos( theta ), self.pt )
rhs = np.multiply( np.sin( theta ) / theta, tVec.tVector )
exp_pt.pt = lhs + rhs
exp_pt.pt = np.divide( exp_pt.pt, exp_pt.norm() )
return exp_pt
def LogMap( self, another_pt ):
m = np.matmul( self.pt, another_pt.pt.T )
U, s, V = np.linalg.svd( m )
rotation = np.matmul( U, V.T )
qRot_pt = np.matmul( rotation, another_pt.pt )
qRot = kendall2D( self.nPt )
qRot.SetPoint( qRot_pt )
cosTheta = self.InnerProduct( qRot )
tVec = kendall2D_tVec( self.nPt )
tVec_mat = np.subtract( qRot.pt, np.multiply( cosTheta, self.pt ) )
tVec.SetTangentVector( tVec_mat )
length = tVec.norm()
if length < 1e-12 or cosTheta >= 1.0 or cosTheta <= -1.0:
tVec = kendall2D_tVec( self.nPt )
return tVec
tVec = tVec.ScalarMultiply( np.arccos( cosTheta ) / length )
return tVec
def ParallelTranslate( self, v, w ):
vNorm = v.norm()
pNorm = self.norm()
if( vNorm < 1.0e-12 or pNorm < 1.0e-12 ):
# print( "tVector too small" )
return w
skew = np.zeros( [ 2, 2 ] )
skew[ 0, 1 ] = -1
skew[ 1, 0 ] = 1
unitV = v.ScalarMultiply( 1.0 / vNorm )
unitJV_mat = np.matmul( skew, unitV.tVector )
unitJV = kendall2D_tVec( self.nPt )
unitJV.SetTangentVector( unitJV_mat )
unitP = self.ScalarMultiply( 1.0 / pNorm )
unitJP_mat = np.matmul( skew, unitP.pt )
unitJP = kendall2D( self.nPt )
unitJP.SetPoint( unitJP_mat )
# If v and w are horizontal, the real inner product will work
wDotUnitV = unitV.InnerProduct( w )
wDotUnitJV = unitJV.InnerProduct( w )
# Component of w orthogonal to v and jv
parallel_mat = np.add( np.multiply( wDotUnitV, unitV.tVector ), np.multiply( wDotUnitJV, unitJV_mat ) )
orth_mat = np.subtract( w.tVector, parallel_mat )
# Compute Parallel Translated V
parallelUnitV_mat = np.add( np.multiply( self.pt, -np.sin( vNorm ) / pNorm ), np.multiply( np.cos( vNorm ), unitV.tVector ) )
# Compute Parallel Translated jV
parallelUnitJV_mat = np.subtract( np.multiply( np.cos( vNorm ), unitJV_mat ), np.multiply( np.sin( vNorm ), unitJP_mat ) )
# Add parallel translated v to orth, and get parallel translated w
parallelW_paraV = np.add( np.multiply( wDotUnitV, parallelUnitV_mat ), np.multiply( wDotUnitJV, parallelUnitJV_mat ) )
parallelW_mat = np.add( parallelW_paraV, orth_mat )
wParallelTranslated = kendall2D_tVec( self.nPt )
wParallelTranslated.SetTangentVector( parallelW_mat )
return wParallelTranslated
def ParallelTranslateAtoB( self, a, b, w ):
v = a.LogMap( b )
return a.ParallelTranslate( v, w )
def ParallelTranslateToA( self, a, w ):
v = self.LogMap( a )
return self.ParallelTranslate( v, w )
def ScalarMultiply( self, t ):
p_t = kendall2D( self.nPt )
for i in range( self.nPt ):
for j in range( 2 ):
p_t.pt[ j, i ] = self.pt[ j, i ] * t
return p_t
def GradientJacobi( self, v, J, dJ ):
vNorm = v.norm()
if( vNorm < 1.0e-12 ):
for i in range( self.nPt ):
for k in range( 2 ):
J.tVector[ k ][ i ] = J.tVector[ k ][ i ] + dJ.tVector[ k ][ i ]
return J
VdotJ = v.InnerProduct( J )
VdotJPrime = v.InnerProduct( dJ )
scaleFactorJ = VdotJ / ( vNorm * vNorm )
scaleFactorJPrime = VdotJPrime / ( vNorm * vNorm )
jTang_mat = np.multiply( v.tVector, scaleFactorJ )
jTang = kendall2D_tVec( self.nPt )
jTang.SetTangentVector( jTang_mat )
dJTang_mat = np.multiply( v.tVector, scaleFactorJPrime )
dJTang = kendall2D_tVec( self.nPt )
dJTang.SetTangentVector( dJTang_mat )
jOrth_mat = np.subtract( J.tVector, jTang_mat )
jOrth = kendall2D_tVec( self.nPt )
jOrth.SetTangentVector( jOrth_mat )
dJOrth_mat = np.subtract( dJ.tVector, dJTang_mat )
dJOrth = kendall2D_tVec( self.nPt )
dJOrth.SetTangentVector( dJOrth_mat )
skew = np.zeros( [ 2, 2 ] )
skew[ 0, 1 ] = -1
skew[ 1, 0 ] = 1
unitV = v.ScalarMultiply( 1.0 / vNorm )
w_mat = np.matmul( skew, unitV.tVector )
w = kendall2D_tVec( self.nPt )
w.SetTangentVector( w_mat )
# Curvature 4 component
jOrth4 = w.ScalarMultiply( w.InnerProduct( jOrth ) )
dJOrth4 = w.ScalarMultiply( w.InnerProduct( dJOrth ) )
# Curvature 1 Component
jOrth1 = kendall2D_tVec( self.nPt )
jOrth1.SetTangentVector( np.subtract( jOrth.tVector, jOrth4.tVector ) )
dJOrth1 = kendall2D_tVec( self.nPt )
dJOrth1.SetTangentVector( np.subtract( dJOrth.tVector, dJOrth4.tVector ) )
# Orthogonal Parts
jOrth.SetTangentVector( np.add( np.multiply( cos( vNorm ), jOrth1.tVector ), np.multiply( cos( 2.0 * vNorm ), jOrth4.tVector ) ) )
dJOrth.SetTangentVector( np.add( np.multiply( np.sin( vNorm ) / vNorm, dJOrth1.tVector ), np.multiply( 0.5 * np.sin( 2.0 * vNorm ) / vNorm, dJOrth4.tVector ) ) )
J_dJ_mat = jTang.tVector + dJTang.tVector + jOrth.tVector + dJOrth.tVector
J_dJ = kendall2D_tVec( self.nPt )
J_dJ.SetTangentVector( J_DJ )
J = self.ParallelTranslate( v, J_dJ )
dJOrth_mat = jOrth1.ScalarMultiply( -vNorm * np.sin( vNorm ) ).tVector + jOrth4.ScalarMultiply( -2.0 * vNorm * sin( 2.0 * vNorm ) ).tVector
dJOrth.SetTangentVector( dJOrth_mat )
ddJOrth_mat = dJOrth1.ScalarMultiply( cos( vNorm ) ).tVector + djOrth4.ScalarMultiply( cos( 2.0 * vNorm ) ).tVector
ddJOrth = kendall2D_tVec( self.nPt )
ddJOrth.SetTangentVector( ddJOrth_mat )
dJ_ddJ_mat = djTang.tVector + dJOrth.tVector + ddJOrth.tVector
dJ_ddJ = kendall2D_tVec( self.nPt )
dJ = self.ParallelTranslate( v, dJ_ddJ )
return J, dJ
def AdjointGradientJacobi( self, v, Jac, dJac ):
vNorm = v.norm()
if( vNorm < 1.0e-12 ):
for i in range( self.nPt ):
for j in range( 2 ):
Jac.tVector[ j ][ i ] = Jac.tVector[ j ][ i ] + dJac.tVector[ j ][ i ]
Jac_Updated = Jac
dJac_Updated = dJac
return Jac_Updated, dJac_Updated
VdotJac = v.InnerProduct( Jac )
VdotJacPrime = v.InnerProduct( dJac )
scaleFactorJac = VdotJac / ( vNorm * vNorm )
scaleFactorJacPrime = VdotJacPrime / ( vNorm * vNorm )
jTang_mat = np.multiply( v.tVector, scaleFactorJac )
jTang = kendall2D_tVec( self.nPt )
jTang.SetTangentVector( jTang_mat )
dJacTang_mat = np.multiply( v.tVector, scaleFactorJacPrime )
dJacTang = kendall2D_tVec( self.nPt )
dJacTang.SetTangentVector( dJacTang_mat )
jOrth_mat = np.subtract( Jac.tVector, jTang_mat )
jOrth = kendall2D_tVec( self.nPt )
jOrth.SetTangentVector( jOrth_mat )
dJacOrth_mat = np.subtract( dJac.tVector, dJacTang_mat )
dJacOrth = kendall2D_tVec( self.nPt )
dJacOrth.SetTangentVector( dJacOrth_mat )
skew = np.zeros( [ 2, 2 ] )
skew[ 0, 1 ] = -1
skew[ 1, 0 ] = 1
unitV = v.ScalarMultiply( 1.0 / vNorm )
w_mat = np.matmul( skew, unitV.tVector )
w = kendall2D_tVec( self.nPt )
w.SetTangentVector( w_mat )
# Curvature 4 component
jOrth4 = w.ScalarMultiply( w.InnerProduct( jOrth ) )
dJacOrth4 = w.ScalarMultiply( w.InnerProduct( dJacOrth ) )
# Curvature 1 Component
jOrth1 = kendall2D_tVec( self.nPt )
jOrth1.SetTangentVector( np.subtract( jOrth.tVector, jOrth4.tVector ) )
dJacOrth1 = kendall2D_tVec( self.nPt )
dJacOrth1.SetTangentVector( np.subtract( dJacOrth.tVector, dJacOrth4.tVector ) )
# Orthogonal Parts
jOrth.SetTangentVector( np.add( np.multiply( np.cos( vNorm ), jOrth1.tVector ), np.multiply( np.cos( 2.0 * vNorm ), jOrth4.tVector ) ) )
dJacOrth.SetTangentVector( np.add( np.multiply( -vNorm * np.sin( vNorm ), dJacOrth1.tVector ), np.multiply( -2.0 * vNorm * np.sin( 2.0 * vNorm ), dJacOrth4.tVector ) ) )
Jac_dJac_mat = jTang.tVector + jOrth.tVector + dJacOrth.tVector
Jac_dJac = kendall2D_tVec( self.nPt )
Jac_dJac.SetTangentVector( Jac_dJac_mat )
Jac_Updated = self.ParallelTranslate( v, Jac_dJac )
dJacOrth_mat = jOrth1.ScalarMultiply( np.sin( vNorm ) / vNorm ).tVector + jOrth4.ScalarMultiply( 0.5 * np.sin( 2.0 * vNorm ) / vNorm ).tVector
dJacOrth.SetTangentVector( dJacOrth_mat )
ddJacOrth_mat = dJacOrth1.ScalarMultiply( np.cos( vNorm ) ).tVector + dJacOrth4.ScalarMultiply( np.cos( 2.0 * vNorm ) ).tVector
ddJacOrth = kendall2D_tVec( self.nPt )
ddJacOrth.SetTangentVector( ddJacOrth_mat )
dJac_ddJac_mat = jTang.tVector + dJacTang.tVector + dJacOrth.tVector + ddJacOrth.tVector
dJac_ddJac = kendall2D_tVec( self.nPt )
dJac_Updated = self.ParallelTranslate( v, dJac_ddJac )
return Jac_Updated, dJac_Updated
def CurvatureTensor( self, x, y, z ):
skew = np.zeros( [ 2, 2 ] )
skew[ 0, 1 ] = -1
skew[ 1, 0 ] = 1
jX_mat = np.matmul( skew, x.tVector )
jY_mat = np.matmul( skew, y.tVector )
jZ_mat = np.matmul( skew, z.tVector )
jX = kendall2D_tVec( self.nPt )
jX.SetTangentVector( jX_mat )
jY = kendall2D_tVec( self.nPt )
jY.SetTangentVector( jY_mat )
jZ = kendall2D_tVec( self.nPt )
jZ.SetTangentVector( jZ_mat )
zxy_mat = np.multiply( self.InnerProduct( z, jX ), jY )
xyz_mat = np.multiply( self.InnerProduct( x, jY ), jZ )
yzx_mat = np.multiply( self.InnerProduct( y, jZ ), jX )
kCurv_mat = zxy_mat + yzx_mat - ( np.multiply( 2.0, xyz ) )
zDotX = self.InnerProduct( z, x )
zDotY = self.InnerProduct( z, y )
sphereCurv_mat = np.multiply( zDotX, y.tVector ) - np.multiply( zDotY, x.tVector )
curv_mat = kCurv_mat + sphereCurv_mat
curv = kendall2D_tVec( self.nPt )
curv.SetTangentVector( curv_mat )
return curv
def SectionalCurvature( self, x, y ):
curv = self.CurvatureTensor( x, y, x )
sec = self.InnerProduct( curv, y )
xx = self.normSquared( x )
yy = self.normSquared( y )
xy = self.InnerProduct( x, y )
secCurv = sec / (( xx * yy ) - ( xy ** 2 ) )
return secCurv
def ProjectTangent( self, p, v ):
meanV = np.average( v.tVector, axis=1 )
print( meanV.shape )
hV = v
for i in range( self.nPt ):
for j in range( 2 ):
hv.tVector[ j, i ] = v.tVector[ j, i ] - meanV[ j ]
skew = np.zeros( [ 2, 2 ] )
skew[ 0, 1 ] = -1
skew[ 1, 0 ] = 1
vert_mat = np.matmul( skew, p.pt )
vert = kendall2D_tVec( self.nPt )
vert.SetTangentVector( vert_mat )
new_hV_mat = hV.tVector - np.multiply( p.InnerProduct( hV, vert ), vert.tVector )
hV.SetTangentVector( new_hV_mat )
return hV
def Write( self, filePath ):
infoList = [ self.Type, self.nDim, self.nPt, self.pt, False ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nDim = infoList[ 1 ]
self.nPt = infoList[ 2 ]
self.pt = infoList[ 3 ]
##########################################################################
## Scale Kendall 2D Shape Space ##
##########################################################################
class scale_kendall2D( object ):
def __init__( self, nPt ):
self.Type = "Scale_Kendall2D"
self.nPt = nPt
self.nDim = nPt - 2
# pt : center, scale, abstract position, radius
self.pt = [ euclidean(1), kendall2D( nPt ) ]
self.scale = self.pt[ 0 ]
self.kShape = self.pt[ 1 ]
self.meanScale = 0
def SetPoint( self, pt ):
if not ( len( pt ) == 2 and pt[ 0 ].nDim == 1 and pt[ 1 ].nPt == self.nPt ):
print( "Scale_Kendall2D.SetPoint")
print( "Error : Dimensions does not match" )
return
self.pt = pt
self.scale = pt[ 0 ]
self.kShape = pt[ 1 ]
def SetMeanScale( self, s ):
self.meanScale = s
return
def ExponentialMap( self, tVec ):
if not tVec.Type == "Scale_Kendall2D_Tangent":
print( "Tangent Vector Type Mismatched" )
return
exp_pt = scale_kendall2D( self.nPt )
exp_pt_arr = []
for i in range( 2 ):
exp_pt_arr.append( self.pt[ i ].ExponentialMap( tVec.tVector[ i ] ) )
exp_pt.SetPoint( exp_pt_arr )
return exp_pt
def LogMap( self, another_pt ):
if not another_pt.Type == "Scale_Kendall2D":
print( "Error: Component Type Mismatched" )
return
tVec = scale_kendall2D_tVec( self.nPt )
for i in range( 2 ):
tVec.tVector[ i ] = self.pt[ i ].LogMap( another_pt.pt[ i ] )
return tVec
def GetScale( self ):
return self.scale
def GetShape( self ):
return self.kShape
def InnerProduct( self, ptA ):
result = 0
# Scale
result += self.pt[ 0 ].InnerProduct( ptA.pt[ 0 ] )
# Abstract Position
result += self.pt[ 1 ].InnerProduct( ptA.pt[ 1 ] )
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ProjectTangent( self, pt, tVec ):
vProjected = scale_kendall2D_tVec( self.nPt )
for i in range( 2 ):
vProjected_tVector[ i ] = self.pt[ i ].ProjectTangent( pt.pt[ i ], tVec.tVector[ i ] )
return vProjected
def ParallelTranslate( self, v, w ):
wParallelTranslated = scale_kendall2D_tVec( self.nDim )
for i in range( 2 ):
wParallelTranslated.tVector[ i ] = self.pt[ i ].ParallelTranslate( v.tVector[ i ], w.tVector[ i ] )
return wParallelTranslated
def ParallelTranslateToA( self, ptA, w ):
v = self.LogMap( ptA )
return ParallelTranslate( v, w )
def ParallelTranslateAtoB( self, a, b, w ):
v = a.LogMap( b )
return a.ParallelTranslate( v, w )
def AdjointGradientJacobi( self, v, j, dj ):
e_base = self
vNorm = v.norm()
jOutput = scale_kendall2D_tVec( self.nPt )
jOutputDash = scale_kendall2D_tVec( self.nPt )
for i in range( 2 ):
jOutput.tVector[ i ], jOutputDash.tVector[ i ] = self.pt[ i ].AdjointGradientJacobi( v.tVector[ i ], j.tVector[ i ], dj.tVector[ i ] )
return jOutput, jOutputDash
def GetEuclideanLocations( self ):
# Multiply Scale
pos_scale_eucldiean_matrix = np.multiply( self.kShape.pt, self.scale.pt[ 0 ] )
# Add Center of Mass
pos_world_coord_euclidean_matrix = np.zeros( [ self.nPt, 2 ] )
for i in range( self.nPt ):
pos_world_coord_euclidean_matrix[ i, : ] = pos_scale_eucldiean_matrix[ :, i ].flatten()
return pos_world_coord_euclidean_matrix
def Write( self, filePath ):
infoList = [ self.Type, self.nPt, self.nDim, self.pt, self.meanScale ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nPt = infoList[ 1 ]
self.nDim = infoList[ 2 ]
self.pt = infoList[ 3 ]
self.scale = self.pt[ 0 ]
self.kShape = self.pt[ 1 ]
self.meanScale = infoList[ 4 ]
class scale_kendall2D_tVec( object ):
def __init__( self, nPt ):
self.Type = "Scale_Kendall2D_Tangent"
self.nPt = nPt
self.nDim = nPt - 2
# tVector : scale, 2D Kendall Shape
self.tVector = [ euclidean_tVec( 1 ), kendall2D_tVec( nPt ) ]
self.meanScale = 1
# def SetTangentVectorFromArray( self, tVecArr ):
# if not len( tVecArr ) == ( 4 + self.tVector[ 2 ].nDim + self.tVector[ 3 ].nDim ):
# print( "Error : Dimension Mismatch" )
# return
# # Center
# self.tVector[ 0 ].tVector[ 0 ] = tVecArr[ 0 ]
# self.tVector[ 0 ].tVector[ 1 ] = tVecArr[ 1 ]
# self.tVector[ 0 ].tVector[ 2 ] = tVecArr[ 2 ]
# # Scale
# self.tVector[ 1 ].tVector[ 0 ] = tVecArr[ 3 ]
# # PreShape
# for k in range( self.tVector[ 2 ].nDim ):
# self.tVector[ 2 ].tVector[ k ] = tVecArr[ k + 4 ]
# # Radius
# for k in range( self.tVector[ 3 ].nDim ):
# self.tVector[ 3 ].tVector[ k ] = tVecArr[ k + 4 + self.tVector[ 2 ].nDim ]
def SetMeanScale( self, meanScale ):
self.meanScale = meanScale
return
def GetTangentVector(self):
return self.tVector
def SetTangentVector( self, tVec ):
if not ( len( tVec ) == 2 and tVec[ 0 ].nDim == 1 and tVec[ 1 ].nPt == self.nPt ):
print( "Error : Dimension Mismatch" )
return
self.tVector = tVec
def InnerProduct( self, tVec1 ):
result = 0
# Scale
result += self.tVector[ 0 ].InnerProduct( tVec1.tVector[ 0 ] )
# Kendall Shapes
result += self.meanScale * self.meanScale * self.tVector[ 1 ].InnerProduct( tVec1.tVector[ 1 ] )
return result
def normSquared( self ):
return self.InnerProduct( self )
def norm( self ):
return np.sqrt( self.normSquared() )
def ScalarMultiply( self, t ):
tVector_t = scale_kendall2D_tVec( self.nPt )
for i in range( 2 ):
tVector_t.tVector[ i ] = self.tVector[ i ].ScalarMultiply( t )
return tVector_t
def Write( self, filePath ):
infoList = [ self.Type, self.nPt, self.nDim, self.tVector, self.meanScale ]
with open( filePath, 'wb' ) as fp:
pickle.dump( infoList, fp )
def Read( self, filePath ):
with open( filePath, 'rb' ) as fp:
infoList = pickle.load( fp )
self.Type = infoList[ 0 ]
self.nPt = infoList[ 1 ]
self.nDim = infoList[ 2 ]
self.tVector = infoList[ 3 ]
self.meanScale = infoList[ 4 ]
|
StarcoderdataPython
|
6660749
|
#!/usr/bin/env python
# _*_coding:utf-8 _*_
#@Time :2019/4/17 0017 下午 5:11
#@Author :喜欢二福的沧月君(<EMAIL>)
#@FileName: train.py
#@Software: PyCharm
import numpy as np
import pandas as pd
titanic_survival= pd.read_csv("titanic_train.csv")
#print(titanic_survival.head)
"""
age中有缺失值
求平均值
age=titanic_survival["Age"]
#print(age.loc[0:10])
age_is_null=pd.isnull(age)
#print(age_is_null)
age_null_true=age[age_is_null]
# print(age_null_true)
age_null_count=len(age_null_true)
# print(age_null_count)
good_ages=titanic_survival["Age"][age_is_null == False]
# print(good_ages)
correct_mean_age=sum(good_ages)/len(good_ages)
# print(correct_mean_age)
"""
"""
求每个船舱的平均存活人数
passenger_survival =titanic_survival.pivot_table(index="Pclass",values="Survived",aggfunc=np.mean)#默认是求均值
# print(passenger_survival)
port_stats =titanic_survival.pivot_table(index="Embarked",values=["Fare","Survived"],aggfunc=np.sum)
# print(port_stats)
new_titanic_survival = titanic_survival.dropna(axis=0,subset=["Age","Sex"])
print(new_titanic_survival)
"""
"""
"""
|
StarcoderdataPython
|
1606768
|
<reponame>idris-rampurawala/form-fueled
from app.pagination import DefaultCursorPagination
from django.conf import settings
from django.db.models import Prefetch
from rest_framework.exceptions import NotFound
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK, HTTP_201_CREATED
from rest_framework.views import APIView
from rest_framework_simplejwt.authentication import JWTAuthentication
from .models import QResponse, Questionnaire, QuestionnaireRespondent
from .serializers import (QuestionnaireCreateSerializer,
QuestionnaireDetailSerializer,
QuestionnaireResponseSerializer,
QuestionnaireResponsesSerializer)
class QuestionnaireApi(APIView):
http_method_names = ['post', 'get']
authentication_classes = (JWTAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self, user_id):
return Questionnaire.objects.filter(user_id=user_id).prefetch_related('question_set')
def get(self, request):
""" API to fetch a questionnaire with questions
"""
queryset = self.get_queryset(request.user.id)
paginator = DefaultCursorPagination()
paginator_response = paginator.paginate_queryset(queryset, request)
serializer = QuestionnaireDetailSerializer(paginator_response, many=True)
return paginator.get_paginated_response(serializer.data)
def post(self, request, format=None):
""" Creates a Questionnaire
"""
serializer = QuestionnaireCreateSerializer(data=request.data, context={'user': request.user})
serializer.is_valid(raise_exception=True)
data = serializer.save()
links = {
'share': f'{settings.UI_BASE_URL}/questionnaire/{data.get("id")}',
'responses': f'{settings.UI_BASE_URL}/questionnaire/{data.get("id")}/responses',
'edit': f'{settings.UI_BASE_URL}/questionnaire/{data.get("id")}/edit'
}
data['links'] = links
return Response({'detail': data}, status=HTTP_201_CREATED)
class QuestionnaireDetailApi(APIView):
http_method_names = ['patch', 'get', 'delete']
authentication_classes = (JWTAuthentication,)
permission_classes = (IsAuthenticated,)
def get_object(self, user_id, pk):
try:
questionnaire_obj = Questionnaire.objects.prefetch_related('question_set').get(pk=pk, user_id=user_id)
return questionnaire_obj
except Questionnaire.DoesNotExist:
raise NotFound
def get(self, request, qid):
""" API to fetch a questionnaire with questions
"""
questionnaire_obj = self.get_object(request.user.id, qid)
serializer = QuestionnaireDetailSerializer(questionnaire_obj)
return Response({'detail': serializer.data}, status=HTTP_200_OK)
def patch(self, request, qid):
""" API to update a questionnaire
"""
questionnaire_obj = self.get_object(request.user.id, qid)
serializer = QuestionnaireCreateSerializer(
questionnaire_obj,
data=request.data,
context={
'user': request.user
},
partial=True)
serializer.is_valid(raise_exception=True)
data = serializer.save()
links = {
'share': f'{settings.UI_BASE_URL}/questionnaire/{data.get("id")}',
'responses': f'{settings.UI_BASE_URL}/questionnaire/{data.get("id")}/responses',
'edit': f'{settings.UI_BASE_URL}/questionnaire/{data.get("id")}/edit'
}
data['links'] = links
return Response({'detail': data}, status=HTTP_200_OK)
def delete(self, request, qid):
""" Deletes a questionnaire and related data
"""
questionnaire_obj = self.get_object(request.user.id, qid)
questionnaire_obj.delete()
return Response({'detail': 'Success deleted the resource.'}, status=HTTP_200_OK)
class QuestionnaireSharedApi(APIView):
http_method_names = ['get', 'post']
def get_object(self, pk):
try:
questionnaire_obj = Questionnaire.objects.prefetch_related('question_set').get(pk=pk)
return questionnaire_obj
except Questionnaire.DoesNotExist:
raise NotFound
def get(self, request, qid):
""" API to fetch a questionnaire with questions (without authentication)
"""
questionnaire_obj = self.get_object(qid)
serializer = QuestionnaireDetailSerializer(questionnaire_obj)
return Response({'detail': serializer.data}, status=HTTP_200_OK)
def post(self, request, qid):
""" API to save a questionnaire responses (without authentication)
"""
questionnaire_obj = self.get_object(qid)
serializer = QuestionnaireResponseSerializer(
data=request.data, context={
'questionnaire_obj': questionnaire_obj})
serializer.is_valid(raise_exception=True)
serializer.save()
return Response({'detail': 'Response saved successfully'}, status=HTTP_200_OK)
class QuestionnaireResponsesApi(APIView):
http_method_names = ['get']
authentication_classes = (JWTAuthentication,)
permission_classes = (IsAuthenticated,)
def get_object(self, user_id, qid):
try:
questionnaire_obj = Questionnaire.objects.prefetch_related(
Prefetch('responses', queryset=QuestionnaireRespondent.objects.prefetch_related(
Prefetch(
'respondent', queryset=QResponse.objects.select_related('question').all())
))).get(pk=qid, user_id=user_id)
return questionnaire_obj
except Questionnaire.DoesNotExist:
raise NotFound
def get(self, request, qid):
""" API to fetch a questionnaire with questions
"""
questionnaire_obj = self.get_object(request.user.id, qid)
serializer = QuestionnaireResponsesSerializer(questionnaire_obj, many=False)
return Response({'detail': serializer.data}, status=HTTP_200_OK)
class QuestionnaireResponsesListApi(APIView):
http_method_names = ['get']
authentication_classes = (JWTAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self, user_id):
return Questionnaire.objects.prefetch_related(
Prefetch('responses', queryset=QuestionnaireRespondent.objects.prefetch_related(
Prefetch(
'respondent', queryset=QResponse.objects.select_related('question').all())
))).filter(user_id=user_id)
def get(self, request):
""" API to fetch all questionnaires alongwith their responses
"""
queryset = self.get_queryset(request.user.id)
paginator = DefaultCursorPagination()
paginator_response = paginator.paginate_queryset(queryset, request)
serializer = QuestionnaireResponsesSerializer(paginator_response, many=True)
return paginator.get_paginated_response(serializer.data)
|
StarcoderdataPython
|
3550053
|
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ModelCheckpoint
import unet
import candle
def initialize_parameters():
unet_common = unet.UNET(
unet.file_path,
'unet_params.txt',
'keras',
prog='unet_example',
desc='UNET example'
)
# Initialize parameters
gParameters = candle.finalize_parameters(unet_common)
return gParameters
def run(gParameters):
# load data
x_train, y_train = unet.load_data()
# example has 420 x 580
model = unet.build_model(420, 580, gParameters['activation'], gParameters['kernel_initializer'])
model.summary()
model.compile(optimizer=gParameters['optimizer'], loss='binary_crossentropy', metrics=['accuracy'])
model_chkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', verbose=1, save_best_only=True)
history = model.fit(x_train, y_train,
batch_size=gParameters['batch_size'],
epochs=gParameters['epochs'],
verbose=1,
validation_split=0.3,
shuffle=True,
callbacks=[model_chkpoint]
)
return history
def main():
gParameters = initialize_parameters()
run(gParameters)
if __name__ == '__main__':
main()
try:
K.clear_session()
except AttributeError:
pass
|
StarcoderdataPython
|
4916895
|
<reponame>jean3108/TwoPlayer-Game
from abc import ABC, abstractmethod
from twoPlayerAiGame.aiAlgorithms import minmaxDecision, negamaxDecision, randomDecision, humanDecision
class StateGame(ABC):
"""
Class wich represent a state of a two-player game
"""
@abstractmethod
def __init__(self, maxPlayer):
"""
Create a state of the game.
:return: The state with the choosen information
:rtype: stateGame
"""
self.maxPlayer = 1 if maxPlayer==True else -1
pass
@abstractmethod
def calculateScore(self):
"""
Calculate the score of the current state if it's a terminal state or estimate the score
:return: The score of the current state
:rtype: number
"""
pass
@abstractmethod
def getChoices(self):
"""
Get the different choice for the player for the current state.
:return: Every choices that the player can make.
:rtype: list[object]
"""
pass
@abstractmethod
def doChoice(self, inNewState = False):
"""
Apply the choice to the current state (inplace or not)
:param inNewState: To choose if the choice is apply inplace (on the current state) or not (on a copy of the current state)
:type inNewState: boolean
:return: Nothing if it's inplace then the new state.
:rtype: stateGame or None
"""
pass
@abstractmethod
def undoChoice(self, inNewState = False):
"""
Undo the given choice for the current state (inplace or not)
:param inNewState: To choose if the choice is apply inplace (on the current state) or not (on a copy of the current state)
:type inNewState: boolean
:return: Nothing if it's inplace then the new state.
:rtype: stateGame or None
"""
pass
@abstractmethod
def toKey(self):
"""
Get the unique ID of the state.
This ID is useful to use memoization in different algorithms
:return: the ID of the current state
:rtype: string
"""
pass
@abstractmethod
def printBeforeGame(self):
"""
Print information before the beginning of the game
"""
pass
@abstractmethod
def printInfoPlayer(self):
"""
Print information before the turn of the current player
"""
pass
@abstractmethod
def printResultAction(self, choice):
"""
Print information after the turn of the current player
:param choice: The choice wich was just played
:type choice: typeof(self.getChoices()[0])
"""
pass
@abstractmethod
def printAfterGame(self):
"""
Print information after the end of the game
"""
pass
def play(self, player1, player2, verbose=True):
"""
Play the game
:param player1: String to choose the algorithm for the choice of the player1 (can be human)
:param player2: String to choose the algorithm for the choice of the player2 (can be human)
:param verbose: Indicate if information are printed or not
:type player1: String
:type player2: String
:type verbose: boolean
:return: the number of the winner then 0
:rtype: int
"""
####################################
# Selection of algorithm & Setting #
####################################
if(player1=='human'):
function1 = humanDecision
elif(player1=='minmax'):
function1 = minmaxDecision
elif(player1=='negamax'):
function1 = negamaxDecision
elif(player1=='random'):
function1 = randomDecision
if(player2=='human'):
function2 = humanDecision
elif(player2=='minmax'):
function2 = minmaxDecision
elif(player2=='negamax'):
function2 = negamaxDecision
elif(player2=='random'):
function2 = randomDecision
#########################
# Beginning of the game #
#########################
over = False
if(verbose==True):
self.printBeforeGame()
currentScore = self.calculateScore()
while(currentScore==False):
if(verbose==True):
self.printInfoPlayer()
if(self.maxPlayer==1):
choice = function1(self)[1]
else:
choice = function2(self)[1]
self.doChoice(choice)
currentScore = self.calculateScore()
if(verbose==True):
self.printResultAction(choice)
if(verbose==True):
self.printAfterGame()
return currentScore
|
StarcoderdataPython
|
8067386
|
from flask import Blueprint
ac = Blueprint('ac', __name__)
@ac.route('/login')
def login():
return 'login'
@ac.route('/logout')
def logout():
return 'logout'
|
StarcoderdataPython
|
8162889
|
<reponame>imapi/Permission-Resolver<filename>permission_resolver/permission_resolver.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from io import TextIOWrapper
from pathlib import PureWindowsPath, PurePosixPath
from typing import List, Union
import argparse
class TreeItem:
def __init__(self, name: str,
readable: bool = False,
writable: bool = False) -> None:
"""
Tree structure with folder name and children sub-folders
:param name: folder name
:param readable: set to True if folder is readable
:param writable: set to True if folder is writable (it will also set readable flag)
"""
self._name = name
self._writable = writable
self._readable = readable
self._children = {}
@property
def name(self) -> str:
return self._name
@property
def writable(self) -> bool:
return self._writable
@property
def readable(self) -> bool:
return self._writable or self._readable
@readable.setter
def readable(self, value: bool) -> None:
self._readable = value
@writable.setter
def writable(self, value: bool) -> None:
self._writable = value
@property
def children(self) -> List['TreeItem']:
"""
Children for the TreeItem
:return: List of TreeItem children
"""
return list(self._children.values())
def add_child(self, child: 'TreeItem') -> 'TreeItem':
"""
Add child to the children list, updated existing child if exists
:param child: TreeItem child
:return: TreeItem child added/updated in the list of children
"""
key = child.name
if key in self._children:
self._children[key].update(child)
else:
self._children[key] = child
return self._children[key]
def remove_child(self, child: 'TreeItem') -> Union['TreeItem', None]:
"""
Remove particular child from the list of children
:param child: child to remove
:return: TreeItem removed child or None if child is not in list of children
"""
return self._children.pop(child.name, None)
def __repr__(self, depth: int = 1) -> str:
"""
String representation of the tree
:param depth: depth level
:return: textual representation of the tree
"""
result = [self.name]
depth += 1
for child in self.children:
result.extend(["\n", " " * depth, child.__repr__(depth)])
return "".join(result)
def update(self, other: 'TreeItem') -> None:
"""
Update current node with other node with the same name, from folder permissions those with wider access
would be used, children list would be merged via add_child to the current node.
:param other: TreeItem other node to merge with
"""
if self.name != other.name:
return
self.readable = self.readable or other.readable
self.writable = self.writable or other.writable
for child in other.children:
self.add_child(child)
def build_writable_folders_tree(readable_folders: List[str],
writable_folders: List[str],
system: str = 'posix') -> TreeItem:
"""
Function builds tree with writable folders leafs (accessible from the root via at least readable ones),
posix and windows like paths supported
:param readable_folders: List of readable absolute paths
:param writable_folders: List of writable absolute paths
:param system: 'posix' or 'windows' - depending on absolute paths system
:return: TreeItem with writable folders leafs (nodes could be readable or readable/writable)
"""
system_flavour = {'posix': PurePosixPath, 'windows': PureWindowsPath}
if system not in system_flavour:
raise ValueError(f'System \'{system}\' is not supported, should be one of: {", ".join(system_flavour.keys())}')
def populate_tree(folders: List[str], readable=False, writable=False) -> None:
for folder in folders:
path = system_flavour[system](folder)
parents = list(reversed(path.parents))
tree = root.add_child(TreeItem(parents[0].as_posix(), readable=True))
for parent in parents[1:]:
tree = tree.add_child(TreeItem(parent.name))
tree.add_child(TreeItem(path.name, readable, writable))
def keep_only_writable(tree: 'TreeItem') -> None:
for child in tree.children:
keep_only_writable(child)
if not child.readable or (not child.children and not child.writable):
tree.remove_child(child)
root = TreeItem("")
populate_tree(readable_folders, readable=True)
populate_tree(writable_folders, writable=True)
keep_only_writable(root)
return root
def _read_files(files: List[TextIOWrapper]) -> List[str]:
content = []
for file in files:
with file:
content += [l.strip() for l in file.readlines()]
return content
def parse_arguments() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Lists accessible for write folders.')
parser.add_argument('-r', '--readable', required=True, nargs='*', type=argparse.FileType('r'),
help='Files with newline separated readable folders list')
parser.add_argument('-w', '--writable', required=True, nargs='*', type=argparse.FileType('r'),
help='Files with newline separated writable folders list')
parser.add_argument('-s', '--system', choices=['posix', 'windows'], nargs=1, type=str, default='posix',
help='Type of system paths (windows or posix)')
return parser.parse_args()
def main() -> None:
args = parse_arguments()
print(build_writable_folders_tree(_read_files(args.readable), _read_files(args.writable), args.system))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
380088
|
#!/usr/bin/env python
# Force Python 2 to use float division even for ints
from __future__ import division
from __future__ import print_function
import importlib
import stacktrain.config.general as conf
import stacktrain.core.cond_sleep as cs
kc = importlib.import_module("stacktrain.%s.keycodes" % conf.provider)
# -----------------------------------------------------------------------------
# Virtual VM keyboard using keycodes
# -----------------------------------------------------------------------------
def keyboard_send_escape(vm_name):
kc.keyboard_push_scancode(vm_name, kc.esc2scancode())
def keyboard_send_enter(vm_name):
kc.keyboard_push_scancode(vm_name, kc.enter2scancode())
def keyboard_send_backspace(vm_name):
kc.keyboard_push_scancode(vm_name, kc.backspace2scancode())
def keyboard_send_f6(vm_name):
kc.keyboard_push_scancode(vm_name, kc.f6_2scancode())
# Turn strings into keycodes and send them to target VM
def keyboard_send_string(vm_name, string):
# This loop is inefficient enough that we don't overrun the keyboard input
# buffer when pushing scancodes to the VM.
for letter in string:
scancode = kc.char2scancode(letter)
kc.keyboard_push_scancode(vm_name, scancode)
# Sleep occasionally to keep us from overruning the keyboard input
# buffer
keyboard_send_string.cnt += 1
if keyboard_send_string.cnt % 50 == 0:
cs.conditional_sleep(1)
keyboard_send_string.cnt = 0
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.