metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JieFeng-cse/robosuite-soft",
"score": 2
} |
#### File: robosuite/myown/test.py
```python
from dm_control import mjcf
from dm_control import composer
import numpy as np
import sys
from dm_control import viewer
import robosuite as suite
env = suite.make(
env_name='Lift',
robots='Sawyer',
has_renderer=False,
has_offscreen_renderer=False,
ignore_done=True,
use_camera_obs=False,
control_freq=100,
)
env.reset()
env_xml = env.model.get_xml()
cloth = """
<mujoco model="parent">
<worldbody>
<composite type="grid" count="9 9 1" spacing="0.05" offset="0.7 0 1.0">
<geom size=".02"/>
</composite>
</worldbody>
</mujoco>"""
world = mjcf.from_xml_string(env_xml)
cloth = mjcf.from_xml_string(cloth)
class MyEntity(composer.ModelWrapperEntity):
def _build(self, mjcf_model):
self._mjcf_model = mjcf_model
self._mjcf_root = mjcf_model
cloth_entity = MyEntity(cloth)
world_entity = MyEntity(world)
world_entity.attach(cloth_entity)
task = composer.NullTask(world_entity)
task.control_timestep = 0.02
env = composer.Environment(task)
viewer.launch(env)
action_spec = env.action_spec()
null_action = np.zeros(action_spec.shape, action_spec.dtype)
num_steps = 1000
for _ in range(num_steps):
env.step(null_action)
```
#### File: robosuite/wrappers/gym_wrapper.py
```python
import numpy as np
from gym import spaces
from gym.core import Env
from robosuite.wrappers import Wrapper
class GymWrapper(Wrapper, Env):
"""
Initializes the Gym wrapper. Mimics many of the required functionalities of the Wrapper class
found in the gym.core module
Args:
env (MujocoEnv): The environment to wrap.
keys (None or list of str): If provided, each observation will
consist of concatenated keys from the wrapped environment's
observation dictionary. Defaults to proprio-state and object-state.
Raises:
AssertionError: [Object observations must be enabled if no keys]
"""
def __init__(self, env, keys=None):
# Run super method
super().__init__(env=env)
# Create name for gym
robots = "".join([type(robot.robot_model).__name__ for robot in self.env.robots])
self.name = robots + "_" + type(self.env).__name__
# Get reward range
self.reward_range = (0, self.env.reward_scale)
if keys is None:
keys = []
# Add object obs if requested
if self.env.use_object_obs:
keys += ["object-state"]
# Add image obs if requested
if self.env.use_camera_obs:
keys += [f"{cam_name}_image" for cam_name in self.env.camera_names]
# Iterate over all robots to add to state
for idx in range(len(self.env.robots)):
keys += ["robot{}_proprio-state".format(idx)]
self.keys = keys
# Gym specific attributes
self.env.spec = None
self.metadata = None
# set up observation and action spaces
obs = self.env.reset()
self.modality_dims = {key: obs[key].shape for key in self.keys}
flat_ob = self._flatten_obs(obs)
self.obs_dim = flat_ob.size
high = np.inf * np.ones(self.obs_dim)
low = -high
self.observation_space = spaces.Box(low=low, high=high)
low, high = self.env.action_spec
self.action_space = spaces.Box(low=low, high=high)
def _flatten_obs(self, obs_dict, verbose=False):
"""
Filters keys of interest out and concatenate the information.
Args:
obs_dict (OrderedDict): ordered dictionary of observations
verbose (bool): Whether to print out to console as observation keys are processed
Returns:
np.array: observations flattened into a 1d array
"""
ob_lst = []
for key in self.keys:
if key in obs_dict:
if verbose:
print("adding key: {}".format(key))
ob_lst.append(np.array(obs_dict[key]).flatten())
return np.concatenate(ob_lst)
def reset(self):
"""
Extends env reset method to return flattened observation instead of normal OrderedDict.
Returns:
np.array: Flattened environment observation space after reset occurs
"""
ob_dict = self.env.reset()
return self._flatten_obs(ob_dict)
def step(self, action):
"""
Extends vanilla step() function call to return flattened observation instead of normal OrderedDict.
Args:
action (np.array): Action to take in environment
Returns:
4-tuple:
- (np.array) flattened observations from the environment
- (float) reward from the environment
- (bool) whether the current episode is completed or not
- (dict) misc information
"""
ob_dict, reward, done, info = self.env.step(action)
# if len(ob_dict) > 0:
# observation = ob_dict['object-state']
# cloth_pos = observation[:3]
# corner1 = observation[3:6]
# corner2 = observation[6:9]
# corner3 = observation[9:12]
# corner4 = observation[12:15]
# dis1 = np.linalg.norm(corner1-cloth_pos)
# dis2 = np.linalg.norm(corner2-cloth_pos)
# dis3 = np.linalg.norm(corner3-cloth_pos)
# dis4 = np.linalg.norm(corner4-cloth_pos)
# dis_sum = dis1+dis2+dis3+dis4
# if dis_sum < 0.34: #9*9
# done = True
# print("unstable stage, too close: ", dis_sum)
# print("corner1: ", corner1)
# print("corner2: ", corner2)
# print("corner3: ", corner3)
# print("corner4: ", corner4)
return self._flatten_obs(ob_dict), reward, done, info
def seed(self, seed=None):
"""
Utility function to set numpy seed
Args:
seed (None or int): If specified, numpy seed to set
Raises:
TypeError: [Seed must be integer]
"""
# Seed the generator
if seed is not None:
try:
np.random.seed(seed)
except:
TypeError("Seed must be an integer type!")
def compute_reward(self, achieved_goal, desired_goal, info):
"""
Dummy function to be compatible with gym interface that simply returns environment reward
Args:
achieved_goal: [NOT USED]
desired_goal: [NOT USED]
info: [NOT USED]
Returns:
float: environment reward
"""
# Dummy args used to mimic Wrapper interface
return self.env.reward()
``` |
{
"source": "JIEGE666/alimama-common-performance-platform-acp",
"score": 2
} |
#### File: src/res_manager/host.py
```python
class Host:
def __init__(self, ip, query=None, totalRes=None, availableRes=None, status=None):
self.ip = ip
self.query = query
self.totalRes = totalRes
self.availableRes = availableRes
self.status = status
```
#### File: src/task/task.py
```python
import sys
import time
import json
import os
path = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(path+"/..")
from common.util import *
from common.zk import *
from agent import *
class Task:
def __init__(self, id, target, qps, source, query_path, query_type, option, status):
self.task_id = id
self.target = target
self.qps = qps
self.source = source
self.query_path = query_path
self.status = status
self.option = option
self.query_type = query_type
@classmethod
def create_task(self, target, qps, source, query_path, query_type, option):
status = "start"
time.sleep(0.002)
id = "t" + str(int(round(time.time()*1000)))
task = Task(id, target, qps, source, query_path, query_type, option, status)
task.create_in_zk()
return task
@classmethod
def delete_task(self, task_id):
task = Task.get_task(task_id)
task.delete_in_zk()
@classmethod
def get_task(self, task_id):
path = CONF.task_path + "/" + task_id
zk = ZK(CONF.zk_address)
root = zk.get_node(path)
if not root:
return None
else:
task = Task.get_task_by_zknode(root)
return task
@classmethod
def get_all_task(self):
zk = ZK(CONF.zk_address)
root = zk.get_node(CONF.task_path)
task_list = []
for child in root.list_children():
task = Task.get_task_by_zknode(child)
task_list.append(task)
return task_list
@classmethod
def get_task_by_zknode(self, node):
status = node.get_value()
info_node = node.get_child("info")
info = json.loads(info_node.get_value())
id = node.get_name()
target = info["target"]
qps = info["qps"]
source = info["source"]
query_path = info["query_path"]
option = info["option"]
query_type = info["query_type"]
task = Task(id, target, qps, source, query_path, query_type, option, status)
return task
def get_status(self):
return self.status
def get_id(self):
return self.task_id
def get_current_qps(self):
total_qps = 0
for agent in self.get_all_agent():
info = agent.get_agent_info()
agent_qps = int(info["qps"])
total_qps += agent_qps
return total_qps
def get_task_info(self):
info = {}
info["target"] = self.target
info["qps"] = self.qps
info["source"] = self.source
info["query_path"] = self.query_path
info["option"] = self.option
info["query_type"] = self.query_type
return info
def add_agent(self, host, agent_id, max_qps, resource_num):
agent = Agent(self.task_id, agent_id, host, self.target, "1", self.source, self.query_path, self.query_type, self.option, str(max_qps), str(resource_num))
agent.create_in_zk()
return agent
def get_all_agent(self):
zk = ZK(CONF.zk_address)
path = "%s/%s/resource/" % (CONF.task_path, self.task_id)
root = zk.get_node(path)
agent_list = []
for host_node in root.list_children():
for agent_node in host_node.list_children():
info_node = agent_node.get_child("info")
try:
info = json.loads(info_node.get_value())
except Exception,e:
print e
continue
target = info["target"]
qps = info["qps"]
source = info["source"]
query_path = info["query_path"]
query_type = info["query_type"]
option = info["option"]
max_qps = info["max_qps"]
resource_num = info["resource_num"]
agent = Agent(self.task_id, agent_node.get_name(), host_node.get_name(), target, qps, source, query_path, query_type, option, max_qps, resource_num)
agent_list.append(agent)
return agent_list
def set_status(self, status):
self.status = status
zk = ZK(CONF.zk_address)
path = CONF.task_path + "/" + self.task_id
root = zk.get_node(path)
root.set_value(status)
def set_qps(self, qps):
self.qps = qps
zk = ZK(CONF.zk_address)
path = CONF.task_path + "/" + self.task_id + "/info"
node = zk.get_node(path)
node.set_value(json.dumps(self.get_task_info()))
def set_agent_qps(self, qps):
QPS_INDEX= 10
for agent in self.get_all_agent():
dic = agent.get_agent_info()
#CONF.log.error("current conf:")
#CONF.log.error(json.dumps(dic))
# CONF.log.error("\n\n")
# agent.set_qps(str(QPS_INDEX))
# QPS_INDEX = QPS_INDEX+10
agent.set_qps(str(qps))
def delete_all_agent(self):
agent_list = self.get_all_agent()
for agent in agent_list:
agent.delete_in_zk()
def create_in_zk(self):
zk = ZK(CONF.zk_address)
root = zk.get_node(CONF.task_path)
t_node = root.add_child(self.task_id, self.status)
t_node.add_child("resource")
info_node = t_node.add_child("info", json.dumps(self.get_task_info()))
def delete_in_zk(self):
zk = ZK(CONF.zk_address)
path = CONF.task_path + "/" + self.task_id
node = zk.get_node(path)
lock = node.get_lock()
lock.acquire(timeout=0.002)
node.delete()
lock.release()
```
#### File: src/test/test_log.py
```python
import sys
sys.path.append("..")
from common.util import *
def test():
logging.warning("AAA")
logging.error("BBB")
test()
``` |
{
"source": "jiegeng321/Simple-classification-and-regression-framework",
"score": 3
} |
#### File: jiegeng321/Simple-classification-and-regression-framework/models.py
```python
import math
import torch
import torch.nn as nn
from torchvision import datasets, models, transforms
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, layers, block=BasicBlock, num_classes=4):
self.inplanes = 32
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 32, layers[0])
self.layer2 = self._make_layer(block, 64, layers[1], stride=2)
#self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
self.layer3 = self._make_layer(block, 128, layers[2], stride=2)
#self.layer4 = self._make_layer(block, 256, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(9, stride=1)
self.fc = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(128 * block.expansion, num_classes),
nn.Softmax(dim=1)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
#print(self.inplanes)
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
#x = self.layer3(x)
x = self.layer3(x)
#x = self.layer4(x)
#x = self.layer5(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True, pretrain_model=None,layers=[1,1,1]):
# 初始化将在此if语句中设置的这些变量。
# 每个变量都是模型特定的。
model_ft = None
# input_size = 224
if model_name == "resnet":
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(num_ftrs, num_classes),
nn.Softmax(dim=1)
)
# input_size = 384
elif model_name == "my_resnet":
model_ft = ResNet(layers, num_classes=num_classes)
if pretrain_model:
model_ft.load_state_dict(torch.load(pretrain_model))
# input_size = 224
elif model_name == "efficientnet":
model_ft = models.efficientnet_b0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
print(model_ft)
num_ftrs = model_ft.classifier[1].in_features
model_ft.classifier[1] = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Softmax(dim=1)
)
elif model_name == "alexnet":
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Softmax(dim=1)
)
# input_size = 224
elif model_name == "vgg":
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Softmax(dim=1)
)
# input_size = 224
elif model_name == "squeezenet":
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1))
model_ft.num_classes = num_classes
# input_size = 224
elif model_name == "densenet":
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
# input_size = 224
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
# 处理辅助网络
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# 处理主要网络
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
# input_size = 299
else:
print("Invalid model name, exiting...")
exit()
return model_ft
if __name__ == '__main__':
#model = ResNet([1,1,1],num_classes=2)
model = initialize_model(model_name="alexnet", num_classes=2, feature_extract=False, use_pretrained=True,
pretrain_model=None,layers=[1,1,1])
x = torch.randn(1, 3, 144, 144)
result = model(x)
# print(result.shape)
print(result)
#print(model)
#print(model.state_dict())
# for i, j in model.state_dict().items():
# print(i, j)
```
#### File: jiegeng321/Simple-classification-and-regression-framework/tools.py
```python
import os
import shutil
def is_img(img_name):
return True if str(img_name).split(".")[-1].lower() in ["jpg","png","jpeg","gif"] and str(img_name)[0] != "." else False
def check_dir(path,delete=False):
if not os.path.exists(path):
os.makedirs(path)
else:
if delete:
shutil.rmtree(path)
os.makedirs(path)
``` |
{
"source": "jiegev5/pytorch-CycleGAN-for-audio",
"score": 2
} |
#### File: pytorch-CycleGAN-for-audio/scripts/save_spec_cyclegan_train.py
```python
import librosa
import numpy as np
import os
import sys
import time
from glob import glob
def compute_mfcc(y):
n_fft = 320
win_length = n_fft
hop_length = int(n_fft/2)
window = 'hamming'
D = librosa.stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window)
data, phase = librosa.magphase(D)
# S = log(S+1)
data = np.log1p(data)
## adding z-score normalize
mean = np.mean(data)
std = np.std(data)
data = (data - mean)/std
data = data/(np.abs(data).max())
print(f'mean is {mean}, std is {std}, shape is {data.shape}')
return data.reshape(1, -1, 601)
in_len = 16000*6
normalize = False
for_cyclegan = True
dict_ = {
"/your/path/to/audio folder":\
"/your/path/to/destination"
}
s = time.time()
for key in dict_.keys():
root = key
target_root = dict_[key]
if not os.path.exists(target_root):
os.mkdir(target_root)
train_target_path = os.path.join(target_root,'trainA')
test_target_path = os.path.join(target_root,'testA')
if not os.path.exists(train_target_path):
os.mkdir(train_target_path)
if not os.path.exists(test_target_path):
os.mkdir(test_target_path)
audio_list = glob(root+'/*.wav')
for num in range(len(audio_list)):
audio = audio_list[num]
data = librosa.core.load(audio, sr=16000)[0]
data = np.pad(data, (0, max(0, in_len - len(data))), "constant") # pad to 1s
mfcc = compute_mfcc(data)
# sys.exit()
chk = np.sum(mfcc)
if np.isnan(chk):
print(f'mean = {mfcc.mean()} min = {mfcc.min()} max = {mfcc.max()}')
continue
else:
name = audio.split('/')[-1].split('.')[0] + '.npy'
if num < len(audio_list)*0.9:
new_name = os.path.join(train_target_path,name)
else:
new_name = os.path.join(test_target_path,name)
print(num,"saving ",name)
np.save(new_name,mfcc)
e = time.time()
print("processing time (min): ",(e-s)/60)
``` |
{
"source": "jieggii/dnevnikru-bot",
"score": 3
} |
#### File: dnevnikru-bot/app/fmt.py
```python
from datetime import datetime
from typing import List
from app.config import config
from app.dnevnikru import Hometask, Timetable
def get_pretty_date(date: datetime) -> str:
return date.strftime("%d.%m")
def get_pretty_timetable(timetable: Timetable) -> str:
response = f"Расписание на {get_pretty_date(timetable.date)}:\n"
for lesson in timetable.lessons:
response += f"{lesson}. {timetable.lessons[lesson]}\n"
return response
def get_pretty_hometasks(hometasks: List[Hometask]) -> str:
response = "Список домашних заданий:\n"
for i, task in enumerate(hometasks):
response += f"{i+1}. {task.subject} (на {get_pretty_date(task.date)}): {task.task}\n\n"
return response
def get_my_mention() -> str:
return f"[club{config.Bot.GROUP_ID}|@{config.Bot.DOMAIN}]"
```
#### File: app/routers/chat.py
```python
import logging
from vkwave.bots import BotEvent, DefaultRouter, SimpleBotEvent
from vkwave.bots.core.dispatching import filters
from app import fmt
from app.config import config
from app.dnevnikru import dnevnikru
from app.filters import CommandsFilter, InviteMeFilter, OnlyMentionMe, PeerIDsFilter
logger = logging.getLogger(__name__)
router = DefaultRouter(
[
filters.MessageFromConversationTypeFilter("from_chat"),
PeerIDsFilter(config.Bot.PEER_IDS),
]
)
reg = router.registrar
invite_me_filter = InviteMeFilter()
only_mention_me_filter = OnlyMentionMe()
@reg.with_decorator(invite_me_filter)
async def handle_invite_me(event: BotEvent):
event = SimpleBotEvent(event)
await event.answer(
"Привет-с! Я бот для классной беседы, с моей помощью вы сможете узнать расписание и дз.\n"
f"Чтобы узнать список команд -- пишите @{config.Bot.DOMAIN}"
)
@reg.with_decorator(only_mention_me_filter | CommandsFilter("help", "помощь", "команды"))
async def handle_help(event: BotEvent):
event = SimpleBotEvent(event)
await event.answer(
"Список моих команд:\n"
"/help -- список команд\n"
"/ht -- список ДЗ\n"
"/today -- расписание на сегодня\n"
"/tomorrow -- расписание на завтра\n"
)
@reg.with_decorator(CommandsFilter("ht", "дз"))
async def handle_ht(event):
event = SimpleBotEvent(event)
hometasks = await dnevnikru.get_hometasks()
response = fmt.get_pretty_hometasks(hometasks)
await event.answer(response)
@reg.with_decorator(CommandsFilter("today", "сегодня", "td"))
async def handle_today(event):
event = SimpleBotEvent(event)
timetable = await dnevnikru.get_timetable_today()
response = fmt.get_pretty_timetable(timetable)
await event.answer(response)
@reg.with_decorator(CommandsFilter("tomorrow", "завтра", "tm"))
async def handle_tomorrow(event):
event = SimpleBotEvent(event)
timetable = await dnevnikru.get_timetable_tomorrow()
response = fmt.get_pretty_timetable(timetable)
await event.answer(response)
``` |
{
"source": "jieggii/giving-tuesday-bot",
"score": 2
} |
#### File: app/db/db.py
```python
from tortoise import Tortoise
from app.config import config
async def init():
await Tortoise.init(
{
"connections": {
"default": {
"engine": "tortoise.backends.asyncpg",
"credentials": {
"host": config.PG.HOST,
"port": config.PG.PORT,
"user": config.PG.USER,
"password": config.PG.PASSWORD,
"database": config.PG.DATABASE,
},
}
},
"apps": {"models": {"models": ["app.db.models"], "default_connection": "default"}},
}
),
await Tortoise.generate_schemas()
``` |
{
"source": "jieggii/vktoken",
"score": 3
} |
#### File: vktoken/cli/args.py
```python
from argparse import ArgumentParser
from vktoken import BUILTIN_APPS, __version__
def get_arg_parser():
parser = ArgumentParser(
description="Tool for getting VK access token.",
prog="vktoken",
)
parser.add_argument(
"-V",
"--version",
action="version",
version=f"%(prog)s {__version__}",
)
parser.add_argument("login", type=str, help="VK account login (mobile phone or email)")
parser.add_argument(
"password",
type=str,
help="VK account password (will be prompted safely if not indicated)",
nargs="?",
)
parser.add_argument(
"--app",
type=str,
choices=[key for key in BUILTIN_APPS.keys()],
help="builtin app to be used to auth",
nargs="?",
)
app = parser.add_argument_group(
"app arguments (can't be used if `--app` was used; must be used both at once)"
)
app.add_argument("-cid", "--client-id", type=str, nargs="?", help="app client id")
app.add_argument("-cs", "--client-secret", type=str, nargs="?", help="app client secret")
return parser
```
#### File: vktoken/cli/log.py
```python
import sys
def log_error(message: str, *, fatal: bool = False):
print(f"Error: {message}", file=sys.stderr)
if fatal:
sys.exit(-1)
def log_info(message: str):
print(message, file=sys.stdout)
``` |
{
"source": "jieggii/witless",
"score": 3
} |
#### File: witless/api/util.py
```python
def convert_size(size: str):
if size in ["any", "любое"]:
return 0
elif size in ["sm", "small", "маленькое", "короткое"]:
return 1
elif size in ["md", "medium", "среднее"]:
return 2
elif size in ["lg", "large", "большое", "длинное"]:
return 3
else:
raise ValueError(f"Unknown size {size}")
def remove_duplicates(array):
return list(set(array))
``` |
{
"source": "jiegillet/project-euler",
"score": 3
} |
#### File: project-euler/Python/Euler 31 - 40.py
```python
import math
def permutations(list):
if len(list)==1:
return [list]
p=permutations(list[1:])
L=len(p)
p*=len(list)
for i in range(len(list)):
for j in range(L):
p[i*L+j]=p[i*L+j][0:i]+[list[0]]+p[i*L+j][i:]
return p
def primes_until(k):
primes=[2,3]
n=3
while n+2<k:
n+=2
p=True
for p in primes:
if p*p>n: break
if n%p==0: p=False; break
if p: primes.append(n)
return primes
def is_in_ordered(x,l):
imin,imax=0,len(l)-1
while imax-imin>1:
imid=(imax+imin)/2
if x==l[imid]:
return True
elif x>l[imid]:
imin=imid
else:
imax=imid
return x==l[imin] or x==l[imax]
def primes_sieve(k):
primes=[True]*k
primes[0],primes[1]=False,False
for i in range(4,k,2):
primes[i]=False
n=3
while n<k:
if primes[n]:
for i in range(2*n,k,n):
primes[i]=False
else:
pass
n+=2
return primes
# # Problem 31
# s=200
# a=0
# tot=0
# for p200 in range(s/200+1):
# a=200*p200
# for p100 in range((s-a)/100+1):
# a=200*p200+p100*100
# for p50 in range((s-a)/50+1):
# a=200*p200+p100*100+p50*50
# for p20 in range((s-a)/20+1):
# a=200*p200+p100*100+p50*50+p20*20
# for p10 in range((s-a)/10+1):
# a=200*p200+p100*100+p50*50+p20*20+p10*10
# for p5 in range((s-a)/5+1):
# a=200*p200+p100*100+p50*50+p20*20+p10*10+p5*5
# tot+=(s-a)/2+1
# print tot
# # Problem 32
# perm=permutations(range(1,10))
# l={}
# for p in perm :
# if int(''.join(map(str,p[:2])))*int(''.join(map(str,p[2:5])))==int(''.join(map(str,p[5:]))):
# print ''.join(map(str,p[:2])),''.join(map(str,p[2:5])),''.join(map(str,p[5:]))
# l[int(''.join(map(str,p[5:])))]=0
# if p[0]*int(''.join(map(str,p[1:5])))==int(''.join(map(str,p[5:]))):
# print p[0],''.join(map(str,p[1:5])),''.join(map(str,p[5:]))
# l[int(''.join(map(str,p[5:])))]=0
# print sum([k for k in l])
# # Problem 33
# for num in range(1,10):
# for den in range(num+1,10):
# for dig in range(1,10):
# if den*(10*num+dig)==num*(10*dig+den) or den*(10*dig+num)==num*(10*den+dig) or den*(10*num+dig)==num*(10*den+dig) or den*(10*dig+num)==num*(10*dig+den):
# print num,den, dig
# # final answer, pen and paper: 100
# # Problem 34
# fact=[math.factorial(i) for i in range(10)]
# s=0
# for i in range(10,1000000):
# if sum([fact[int(k)] for k in str(i)])==i:
# s+=i
# print i
# print s
# # Problem 35
# def rot(n):
# dig=str(n)
# return list(set([ int(''.join(dig[i:]+dig[:i])) for i in range(len(dig))]))
#
# lim=1000000
# primes=primes_sieve(lim)
# c=0
# for p in range(lim):
# if not primes[p]:
# pass
# else:
# r=rot(p)
# if all( primes[d] for d in r):
# c+=len(r)
# for d in r:
# primes[d]=False
# print c
# # Problem 36
# c=0
# for i in range(1,1000000,2):
# if str(i)==str(i)[::-1] and "{0:b}".format(i)=="{0:b}".format(i)[::-1]:
# c+=i
# print i,"{0:b}".format(i)
# print c
# # Problem 37
# def is_truncable(n,primes):
# if n<10:
# return False
# n2=str(n)
# return all([is_in_ordered(int(''.join(n2[i:])),primes) for i in range(len(n2))]+[is_in_ordered(int(''.join(n2[:i])),primes) for i in range(1,len(n2))])
#
# primes=primes_until(1000000)
# s=0
# for p in primes:
# if is_truncable(p,primes):
# s+=p
# print p
# print "sum is %d"%(s)
# # Problem 38
# def are_pandigital(l): # Is a list of number l pandigital?
# return ''.join(sorted(''.join(map(str,l))))=='123456789'
#
# pan=[]
# for i in range(1,10000):
# p=[i]
# c=1
# while sum([len(str(k)) for k in p])<9:
# c+=1
# p+=[c*i]
# if sum([len(str(k)) for k in p])==9 and are_pandigital(p):
# pan.append(int(''.join(map(str,p))))
# print pan
# print max(pan)
# # Problem 39
# smax,pmax=0,0
# for p in range(2,1000,2):
# sol=0
# for c in range(3,p/2):
# for a in range(1,c-2):
# if a**2+(p-c-a)**2==c**2:
# sol+=1
# if sol>smax:
# smax,pmax=sol,p
# print pmax
# Problem 40
n='0'
i=0
while len(n)<1E6+1:
i+=1
n+=str(i)
print int(n[1])*int(n[10])*int(n[100])*int(n[1000])*int(n[10000])*int(n[100000])*int(n[1000000])
``` |
{
"source": "jieguangzhou/AIChallenger_SentimentAnalysis",
"score": 3
} |
#### File: AIChallenger_SentimentAnalysis/hah_classification/data.py
```python
from collections import Counter
import logging
import random
import numpy as np
import jieba
from hah_classification.develop.IO import read_file, write_file
import pandas as pd
import os
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
PAD_IDX = 0
UNK_IDX = 1
COLUMNS = ['location_traffic_convenience', 'location_distance_from_business_district', 'location_easy_to_find',
'service_wait_time', 'service_waiters_attitude', 'service_parking_convenience', 'service_serving_speed',
'price_level', 'price_cost_effective', 'price_discount',
'environment_decoration', 'environment_noise', 'environment_space', 'environment_cleaness',
'dish_portion', 'dish_taste', 'dish_look', 'dish_recommendation',
'others_overall_experience', 'others_willing_to_consume_again']
def segment(sentence):
return [i for i in sentence if i.strip()]
def load_vocab(vocab_path):
"""
读取词典
"""
vocab = {token: index for index, token in
enumerate(read_file(vocab_path, deal_function=lambda x: x.strip() if x != '\n' else x))}
logger.info('load vocab (size:%s) to %s' % (len(vocab), vocab_path))
return vocab
def save_vocab(vocab, vocab_path):
"""
保存词典
"""
sorted_vocab = sorted(vocab.items(), key=lambda x: x[1])
write_file(sorted_vocab, vocab_path, deal_function=lambda x: x[0] + '\n')
logger.info('save vocab (size:%s) to %s' % (len(vocab), vocab_path))
def load_data(data_path, vocab_path, label_vocab_path, create_vocab=False, create_label_vocab=False, min_freq=1,
vocab_size=None, return_label_vocab=False):
msg = 'load data from %s, ' % data_path
data_set = pd.read_csv(data_path)
vocab_ = Counter() if create_vocab else load_vocab(vocab_path)
label_vocab = {} if create_label_vocab else load_vocab(label_vocab_path)
sequences, lengths = [], []
for content in data_set.iloc[:, 1]:
tokens = segment(content)
if create_vocab:
vocab_.update(tokens)
sequences.append(tokens)
lengths.append(len(tokens))
if create_vocab:
vocab = {'<PAD>': PAD_IDX, '<UNK>': UNK_IDX}
# vocab_size 必须大于2
print('ori vocab size %s' % len(vocab_))
vocab_size = max(vocab_size or len(vocab_), 2) - 2
logger.info('create vocab, min freq: %s, vocab_size: %s' % (min_freq, vocab_size))
for token, count in vocab_.most_common(vocab_size):
if not token:
continue
if count < min_freq:
break
else:
vocab[token] = len(vocab)
save_vocab(vocab, vocab_path)
else:
vocab = vocab_
columns = data_set.columns.values.tolist()[2:]
dict_labels = {}
dict_label_vocab = {}
for col in columns:
labels = [str(i) for i in data_set[col]]
col_vocab_path = label_vocab_path + '.' + col
if create_label_vocab:
label_vocab = {vocab: index for index, vocab in enumerate(sorted(set(labels)))}
save_vocab(label_vocab, col_vocab_path)
else:
label_vocab = load_vocab(col_vocab_path)
if not return_label_vocab:
labels = list(map(lambda x: label_vocab[x], labels))
dict_labels[col] = np.array(labels)
dict_label_vocab[col] = label_vocab
if create_label_vocab:
save_vocab(label_vocab, label_vocab_path)
sequences = [[vocab.get(token, UNK_IDX) for token in sequence] for sequence in sequences]
msg += 'total : %s' % len(sequences)
logger.info(msg)
if return_label_vocab:
return np.array(sequences), dict_labels, np.array(lengths), dict_label_vocab
else:
return np.array(sequences), dict_labels, np.array(lengths)
def load_muti_label_data(data_path, vocab_path, create_vocab=False,
min_freq=1,
vocab_size=None):
msg = 'load data from %s, ' % data_path
data_set = pd.read_csv(data_path)
vocab_ = Counter() if create_vocab else load_vocab(vocab_path)
sequences, lengths = [], []
for content in data_set.iloc[:, 1]:
tokens = segment(content)
if create_vocab:
vocab_.update(tokens)
sequences.append(tokens)
lengths.append(len(tokens))
if create_vocab:
vocab = {'<PAD>': PAD_IDX, '<UNK>': UNK_IDX}
# vocab_size 必须大于2
print('ori vocab size %s' % len(vocab_))
vocab_size = max(vocab_size or len(vocab_), 2) - 2
logger.info('create vocab, min freq: %s, vocab_size: %s' % (min_freq, vocab_size))
for token, count in vocab_.most_common(vocab_size):
if not token:
continue
if count < min_freq:
break
else:
vocab[token] = len(vocab)
save_vocab(vocab, vocab_path)
else:
vocab = vocab_
labels = data_set[COLUMNS].values + 2
sequences = [[vocab.get(token, UNK_IDX) for token in sequence] for sequence in sequences]
msg += 'total : %s' % len(sequences)
logger.info(msg)
return np.array(sequences), labels, np.array(lengths)
def batch_iter(sequences, labels, lengths, batch_size=64, reverse=False, cut_length=None, shuffle=True):
"""
将数据集分成batch输出
:param sequences: 文本序列
:param labels: 类别
:param lengths: 文本长度
:param reverse: 是否reverse文本
:param cut_length: 截断文本
:return:
"""
# 打乱数据
data_num = len(sequences)
indexs = list(range(len(sequences)))
if shuffle:
random.shuffle(indexs)
batch_start = 0
shuffle_sequences = sequences[indexs]
shuffle_labels = labels[indexs]
shuffle_lengths = lengths[indexs]
while batch_start < data_num:
batch_end = batch_start + batch_size
batch_sequences = shuffle_sequences[batch_start:batch_end]
batch_labels = shuffle_labels[batch_start:batch_end]
batch_lengths = shuffle_lengths[batch_start:batch_end]
if isinstance(cut_length, int):
# 截断数据
batch_sequences = [sequence[:cut_length] for sequence in batch_sequences]
batch_lengths = np.where(batch_lengths > cut_length, cut_length, batch_lengths)
# padding长度
batch_max_length = batch_lengths.max()
batch_padding_sequences = []
for sequence, length in zip(batch_sequences, batch_lengths):
sequence += [PAD_IDX] * (batch_max_length - length)
if reverse:
sequence.reverse()
batch_padding_sequences.append(sequence)
batch_padding_sequences = np.array(batch_padding_sequences)
yield batch_padding_sequences, batch_labels, batch_lengths
batch_start = batch_end
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
vocab_path = '../data/vocab.txt'
label_vocab_path = '../cnews/label.txt'
data_set = load_data('../data/sentiment_analysis_validationset.csv', vocab_path, label_vocab_path,
create_vocab=True, create_label_vocab=True, vocab_size=5000)
# num = 0
# for sequences, labels, lengths in batch_iter(*data_set, batch_size=64):
# print(sequences.shape[1], lengths.max(), sequences.shape[1] == lengths.max())
```
#### File: AIChallenger_SentimentAnalysis/hah_classification/main.py
```python
import tensorflow as tf
import argparse
import os
from sklearn.metrics import f1_score
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from hah_classification.models.model import ClassificationModel
from hah_classification.opt import *
from hah_classification.data import batch_iter, load_muti_label_data
from hah_classification.develop.timer import Timer
from hah_classification.develop.IO import check_path
from hah_classification.utils import save_opt
message = 'step:{0:6}, train loss:{1:6.2}, train accuarcy:{2:7.2%}, val loss :{3:6.2}, val accuary:{4:7.2%}, val f1:{5:6.2}, cost:{6}'
def create_classification_model(class_first,
class_second,
filter_num,
kernel_sizes,
skip,
layer_num,
vocab_size,
embedding_size,
class_num,
learning_rate,
keep_drop_prob=1.0,
embedding_path=None,
inference=False):
if not inference:
with tf.variable_scope('Fasttext', reuse=tf.AUTO_REUSE):
train_model = ClassificationModel(class_first,
class_second,
filter_num,
kernel_sizes,
skip,
layer_num,
vocab_size,
embedding_size,
class_num,
learning_rate,
keep_drop_prob=keep_drop_prob,
is_train=True,
embedding_path=embedding_path)
else:
train_model = None
with tf.variable_scope('Fasttext', reuse=tf.AUTO_REUSE):
inference_model = ClassificationModel(class_first,
class_second,
filter_num,
kernel_sizes,
skip,
layer_num,
vocab_size,
embedding_size,
class_num,
learning_rate,
keep_drop_prob=keep_drop_prob,
is_train=False,
embedding_path=None)
return train_model, inference_model
def get_feed_dict(model, sequences, labels, lengths):
feed_dict = {model.input_sequences: sequences,
model.input_labels: labels,
model.input_lengths: lengths}
return feed_dict
def evaluate(sess, model, dataset, batch_size=64, reverse=False, cut_length=None):
"""评估模型在特定数据集上的loss和accuracy"""
total_num = len(dataset[0])
total_loss = 0
total_accuracy = 0
correct_labels = None
predict_labels = None
for sequences, labels, lengths in batch_iter(*dataset,
batch_size=batch_size,
reverse=reverse,
cut_length=cut_length,
shuffle=False):
loss, accuracy, predict_label = sess.run([model.loss, model.accuracy, model.class_labels],
feed_dict=get_feed_dict(model, sequences, labels, lengths))
if correct_labels is None:
correct_labels = labels
predict_labels = predict_label
else:
correct_labels = np.concatenate([correct_labels, labels])
predict_labels = np.concatenate([predict_labels, predict_label])
batch_num = len(labels)
total_loss += batch_num * loss
total_accuracy += batch_num * accuracy
all_f1 = []
for correct_label, predict_label in zip(correct_labels.T, predict_labels.T):
f1 = f1_score(correct_label, predict_label, average='macro')
all_f1.append(f1)
mean_f1 = sum(all_f1) / len(all_f1)
return total_loss / total_num, total_accuracy / total_num, mean_f1
def create_model(opt, inference=False):
train_model, inference_model = create_classification_model(opt.class_first,
opt.class_second,
opt.filter_num,
opt.kernel_sizes,
opt.skip,
opt.layer_num,
opt.vocab_size,
opt.embedding_size,
opt.class_num,
opt.learning_rate,
keep_drop_prob=opt.keep_drop_prob,
embedding_path=opt.embedding_path,
inference=inference)
if not inference:
inference_model.print_parms()
return train_model, inference_model
def train(opt):
save_path = os.path.join(opt.save_path)
check_path(save_path, create=True)
print('create model')
train_model, inference_model = create_model(opt)
save_opt(opt, save_path)
# 读取train集和val集,并给予训练集合创建词典
print('load data set')
train_dataset = load_muti_label_data(opt.train_data, opt.vocab_path,
create_vocab=True, vocab_size=opt.vocab_size)
val_dataset = load_muti_label_data(opt.val_data, opt.vocab_path)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
saver = tf.train.Saver(max_to_keep=1)
timer = Timer()
best_f1 = 0
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(save_path)
if ckpt:
print('load model from : %s' % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
tensorboard_path = os.path.join(save_path, 'tensorborad')
summary_writer = tf.summary.FileWriter(tensorboard_path, sess.graph)
for epoch in range(opt.epoch_num):
epoch_key = 'Epoch: %s' % (epoch + 1)
print(epoch_key)
timer.mark(epoch_key)
total_loss, total_accuracy, total_num = 0, 0, 0
train_batch_data = batch_iter(*train_dataset,
batch_size=opt.batch_size,
reverse=opt.reverse,
cut_length=opt.cut_length)
for sequences, labels, lengths in train_batch_data:
loss, accuracy, global_step, _ = sess.run(
[train_model.loss, train_model.accuracy, train_model.global_step, train_model.optimize],
feed_dict=get_feed_dict(train_model, sequences, labels, lengths))
batch_num = len(labels)
total_num += batch_num
total_loss += batch_num * loss
total_accuracy += batch_num * accuracy
if global_step % opt.print_every_step == 0:
train_loss = total_loss / total_num
train_accuary = total_accuracy / total_num
val_loss, val_accuary, f1 = evaluate(sess, inference_model, val_dataset,
batch_size=opt.batch_size,
reverse=opt.reverse,
cut_length=opt.cut_length)
summary = tf.Summary(value=[
tf.Summary.Value(tag='train_loss', simple_value=train_loss),
tf.Summary.Value(tag='train_accuary', simple_value=train_accuary),
tf.Summary.Value(tag='val_loss', simple_value=val_loss),
tf.Summary.Value(tag='val_accuary', simple_value=val_accuary),
tf.Summary.Value(tag='val_f1', simple_value=f1),
])
summary_writer.add_summary(summary, global_step)
cost_time = timer.cost_time()
print(message.format(global_step, train_loss, train_accuary, val_loss, val_accuary, f1,
cost_time))
if f1 > best_f1:
best_f1 = f1
saver.save(sess, os.path.join(save_path, inference_model.name), global_step=global_step)
total_loss, total_accuracy, total_num = 0, 0, 0
val_loss, val_accuary, val_f1 = evaluate(sess, inference_model, val_dataset,
batch_size=opt.batch_size,
reverse=opt.reverse,
cut_length=opt.cut_length)
if val_f1 > best_f1:
saver.save(sess, os.path.join(save_path, inference_model.name))
cost_time = timer.cost_time()
print('val accuary:{0:7.2%}, val f1:{1:6.2}, cost:{2}'.format(val_accuary, val_f1, cost_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
add_nn_opt(parser)
add_cnn_opt(parser)
add_train_opt(parser)
add_data_opt(parser)
opt = parser.parse_args()
train(opt)
```
#### File: hah_classification/models/model.py
```python
import tensorflow as tf
from hah_classification.models.nn_factory import init_embedding, muti_class_attention, muti_layer_conv, fc
class ClassificationModel:
def __init__(self,
class_first,
class_second,
filter_num,
kernel_sizes,
skip,
layer_num,
vocab_size,
embedding_size,
class_num,
learning_rate,
keep_drop_prob=1.0,
is_train=False,
embedding_path=None):
self.class_first = class_first
self.class_second = class_second
self.filter_num = filter_num
self.kernel_sizes = kernel_sizes
self.skip = skip
self.layer_num = layer_num
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.class_num = class_num
self.learning_rate = learning_rate
self.keep_drop_prob = keep_drop_prob if is_train else 1.0
self.is_train = is_train
self.embedding_path = embedding_path
self.build()
self.name = self.__class__.__name__
def build(self):
self._build_placeholder()
self._build_embedding(self.vocab_size, self.embedding_size, self.embedding_path)
input_sequences_emb = tf.nn.embedding_lookup(self.embedding, self.input_sequences)
input_sequences_emb = tf.layers.dropout(input_sequences_emb,
rate=1 - self.keep_drop_prob,
training=self.is_train)
conv_out = muti_layer_conv(input_sequences_emb, self.filter_num, self.skip, self.layer_num)
first_outs = muti_class_attention(conv_out, self.class_first)
outputs = []
for first_out, class_num in zip(first_outs, self.class_second):
seconds_outs = muti_class_attention(conv_out, class_num, concat_inputs=first_out)
for second_out in seconds_outs:
outputs.append(fc(second_out, self.class_num))
losses = []
class_pros = []
class_labels = []
labels = tf.unstack(self.input_labels, axis=1)
for output, label in zip(outputs, labels):
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, labels=label))
class_pro = tf.nn.softmax(output, axis=1)
class_label = tf.expand_dims(tf.argmax(class_pro, axis=1, output_type=tf.int32), axis=1)
index = tf.stack([tf.range(tf.shape(class_label)[0])[:, None], class_label], axis=2)
class_pro = tf.gather_nd(class_pro, index)
losses.append(loss)
class_pros.append(class_pro)
class_labels.append(class_label)
self.loss = tf.reduce_sum(losses)
self.class_pros = tf.concat(class_pros, axis=1)
self.class_labels = tf.concat(class_labels, axis=1)
correct_pred = tf.equal(self.class_labels, self.input_labels)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
if self.is_train:
self._build_optimize(self.loss, self.learning_rate)
def inference(self, sequences, lengths):
labels, class_pro = self.session.run([self.class_labels, self.class_pros],
feed_dict={self.input_sequences: sequences,
self.input_lengths: lengths})
return labels, class_pro
def to_inference(self, model_path):
ckpt = tf.train.get_checkpoint_state(model_path)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.session = tf.Session(config=config)
saver = tf.train.Saver(max_to_keep=1)
saver.restore(self.session, ckpt.model_checkpoint_path)
print('use_inference')
def _build_placeholder(self, batch_size=None, sequence_length=None):
"""
构建placeholder
:param batch_size: 默认为None,即动态batch_size
:param sequence_length: 序列长度
"""
with tf.name_scope('input_placeholder'):
self.input_sequences = tf.placeholder(tf.int32, [batch_size, sequence_length], 'input_sequences')
self.input_labels = tf.placeholder(tf.int32, [batch_size, sum(self.class_second)])
self.input_lengths = tf.placeholder(tf.int32, [batch_size], 'input_lengths')
def _build_embedding(self, vocab_size, embedding_size, embedding_path):
with tf.device('/cpu:0'):
self.embedding = init_embedding(vocab_size, embedding_size, embedding_path)
def _build_optimize(self, loss, learning_rate, optimizer='adam'):
self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
if optimizer.lower() == 'adam':
Optimizer = tf.train.AdamOptimizer
else:
Optimizer = tf.train.GradientDescentOptimizer
self.optimize = Optimizer(learning_rate=learning_rate).minimize(loss, global_step=self.global_step)
def print_parms(self):
print('\n', '-' * 20)
print('%s : parms' % self.name)
for var in tf.trainable_variables():
print(var.name, var.shape)
print('-' * 20, '\n')
```
#### File: hah_classification/models/nn_factory.py
```python
import tensorflow as tf
from tensorflow.contrib import rnn
def init_embedding(vocab_size, embedding_size, embedding_path=None, name='embedding'):
embedding = tf.get_variable(name, [vocab_size, embedding_size])
return embedding
def rnn_factory(num_units, layer_num, cell_type='lstm', input_keep_prob=1.0, output_keep_prob=1.0):
if cell_type.lower() == 'lstm':
cell_func = rnn.BasicLSTMCell
elif cell_type.lower() == 'gru':
cell_func = rnn.GRUCell
else:
cell_func = rnn.RNNCell
cells = [cell_func(num_units) for _ in range(layer_num)]
drop_func = lambda cell: rnn.DropoutWrapper(cell,
input_keep_prob=input_keep_prob,
output_keep_prob=output_keep_prob)
cell = rnn.MultiRNNCell(list(map(drop_func, cells)))
return cell
def muti_layer_conv(inputs, filter_num, skip=1, layer_num=1):
out = inputs
for i in range(layer_num):
with tf.variable_scope('CNN_%s'%(i+1)):
out = tf.layers.conv1d(out, filter_num, skip, padding='same')
if i > 0:
out = inputs + out
# out = tf.tanh(out)
return out
def muti_class_attention(inputs, class_num, concat_inputs=None):
attention = tf.nn.softmax(tf.layers.conv1d(inputs, class_num, 1, padding='same', activation=tf.nn.relu), axis=2)
inputs_reshape = tf.transpose(inputs, [0, 2, 1])
attention_out = tf.matmul(inputs_reshape, attention)
outputs = []
for output in tf.unstack(attention_out, axis=2):
if concat_inputs is not None:
output = tf.concat([output, concat_inputs], axis=1)
outputs.append(output)
return outputs
def fc(inputs, class_num):
# fc = tf.layers.dense(inputs, class_num, activation=tf.nn.relu)
output = tf.layers.dense(inputs, class_num)
return output
```
#### File: AIChallenger_SentimentAnalysis/hah_classification/predict.py
```python
import argparse
import pandas as pd
import numpy as np
import os
from hah_classification.utils import load_opt
from hah_classification.opt import add_train_opt
from hah_classification.main import create_model
from hah_classification.data import load_muti_label_data, batch_iter, COLUMNS
def main(opt):
base_dataset = load_muti_label_data(opt.test_data, opt.vocab_path)
result_data = pd.read_csv(opt.test_data)
_, model = create_model(opt, inference=True)
model.to_inference(os.path.join(opt.save_path))
predict_labels = None
for sequences, labels, lengths in batch_iter(*base_dataset,
batch_size=opt.batch_size,
reverse=opt.reverse,
cut_length=opt.cut_length,
shuffle=False):
predict_label, _ = model.inference(sequences, lengths)
if predict_labels is None:
predict_labels = predict_label
else:
predict_labels = np.concatenate([predict_labels, predict_label])
result_data[COLUMNS] = predict_labels - 2
model.session.close()
result_data.to_csv(os.path.join(opt.save_path, 'test_data_predict_out.csv'), encoding="utf_8", index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
add_train_opt(parser)
save_path = parser.parse_args().save_path
opt = load_opt(save_path)
opt.test_data = 'data/sentiment_analysis_testa.csv'
main(opt)
```
#### File: AIChallenger_SentimentAnalysis/hah_classification/utils.py
```python
import pickle
import os
def save_opt(opt, path):
pickle.dump(opt, open(os.path.join(path, 'opt'), 'wb'))
def load_opt(path):
opt = pickle.load(open(os.path.join(path, 'opt'), 'rb'))
return opt
``` |
{
"source": "jieguangzhou/TextClassification",
"score": 3
} |
#### File: text_classification/models_tf/fasttext.py
```python
import tensorflow as tf
from text_classification.models_tf.model import BinaryClassificationModel
class FastText(BinaryClassificationModel):
def __init__(self,
vocab_size,
embedding_size,
class_num,
learning_rate,
keep_drop_prob=1.0,
is_train=False,
embedding_path=None):
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.class_num = class_num
self.learning_rate = learning_rate
self.keep_drop_prob = keep_drop_prob if is_train else 1.0
self.is_train = is_train
self.embedding_path = embedding_path
super(FastText, self).__init__()
def build(self):
self._build_placeholder()
self._build_embedding(self.vocab_size, self.embedding_size, self.embedding_path)
input_sequences_emb = tf.nn.embedding_lookup(self.embedding, self.input_sequences)
self.output = tf.reduce_mean(input_sequences_emb, axis=1)
self._build_output(self.output, self.class_num, self.keep_drop_prob)
if self.is_train:
self._build_optimize(self.loss, self.learning_rate)
```
#### File: text_classification/models_tf/textrnn.py
```python
import tensorflow as tf
from text_classification.models_tf.nn_factory import rnn_factory
from text_classification.models_tf.model import BinaryClassificationModel
class TextRNN(BinaryClassificationModel):
"""
TextRNN
"""
def __init__(self,
num_units,
layer_num,
vocab_size,
embedding_size,
class_num,
learning_rate,
keep_drop_prob=1.0,
cell_type='lstm',
bidirectional=False,
is_train=False,
embedding_path=None):
self.num_units = num_units
self.layer_num = layer_num
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.class_num = class_num
self.learning_rate = learning_rate
self.keep_drop_prob = keep_drop_prob if is_train else 1.0
self.cell_type = cell_type
self.bidirectional = bidirectional
self.is_train = is_train
self.embedding_path = embedding_path
super(TextRNN, self).__init__()
def build(self):
self._build_placeholder()
self._build_embedding(self.vocab_size, self.embedding_size, self.embedding_path)
input_sequences_emb = tf.nn.embedding_lookup(self.embedding, self.input_sequences)
with tf.variable_scope('Rnn'):
cell_fw = rnn_factory(self.num_units, self.layer_num, self.cell_type, output_keep_prob=self.keep_drop_prob)
if self.bidirectional:
cell_bw = rnn_factory(self.num_units, self.layer_num, self.cell_type, output_keep_prob=self.keep_drop_prob)
rnn_outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw,
cell_bw,
input_sequences_emb,
sequence_length=self.input_lengths,
dtype=tf.float32)
self.outputs = tf.concat(rnn_outputs, axis=2)
else:
self.outputs, _ = tf.nn.dynamic_rnn(cell_fw, input_sequences_emb, dtype=tf.float32)
self.outputs_last_step = self.outputs[:, -1, :]
self._build_output(self.outputs_last_step, class_num=self.class_num)
if self.is_train:
self._build_optimize(self.loss, self.learning_rate)
``` |
{
"source": "jiegun314/Antares",
"score": 3
} |
#### File: Antares/antares/crt_inv_display.py
```python
from crt_inv_calculation import CurrentInventoryCalculation as CIC
from crt_inv_calculation import TraumaCurrentInventoryCalculation as TU_CIC
from tabulate import tabulate
import public_function as pb_func
import pandas as pd
class CurrentInventoryDisplay:
bu_name = ""
db_path = "../data/_DB/"
backorder_path = "../data/_Backorder/"
inventory_path = "../data/_INV_Export/"
source_file_path = "../data/_Source_Data/"
oneclick_path = "L:\\COMPASS\\Oneclick Inventory Report\\Output\\"
currency_rate = 7.0842
def __init__(self):
pass
def display_code_status(self):
CodeCalculation = CIC(self.__class__.bu_name)
print("===Single Code Inventory===")
# 获取日期
code_name = input("Input Material Code: ").upper()
# check if this code exist in material master
while not CodeCalculation.check_code(code_name):
code_name = input("Wrong code, please re-input: ").upper()
# start to get inventory data from oneclick database
str_input = input("Please input date (YYYYMMDD) OR press Enter to get most fresh date: ")
table_name = CodeCalculation.get_newest_date() if str_input == "" else "INV" + str_input
# check if this date exist in newest oneclick file
while not CodeCalculation.check_date_availability(table_name):
print("!!Error - Wrong date, Please re-input! ")
str_input = input("Please input date (YYYYMMDD) OR press Enter to get most fresh date: ")
table_name = CodeCalculation.get_newest_date() if str_input == "" else "INV" + str_input
print("===== <Result of %s> =====" % table_name.lstrip("INV"))
code_inv_output = CodeCalculation.get_code_inv(code_name, table_name)
print(tabulate(code_inv_output, headers="firstrow", floatfmt=",.0f", tablefmt="github"))
def display_mapping_inventory(self):
CodeCalculation = CIC(self.__class__.bu_name)
print("===Inventory Status Mapping with Lists===")
# get data file
file_fullname = self.__class__.source_file_path + "Data_Mapping.txt"
try:
fo = open(file_fullname, "r")
except FileNotFoundError:
print("!Error, please make sure you have put Data_Mapping.txt under _Source_Data folder")
return
code_list = [item.strip() for item in fo.readlines()]
# get the date
inventory_date = input("Inventory Data (YYYYMMDD, Press Enter to get newest) : ")
if inventory_date == "":
table_name = CodeCalculation.get_newest_date()
else:
table_name = "INV" + inventory_date
if not CodeCalculation.check_date_availability(table_name):
print("!Error, please make sure you input the correct date.")
return
inventory_result = CodeCalculation.inventory_mapping(code_list, table_name)
print(tabulate(inventory_result, headers="firstrow", tablefmt="github",
showindex=range(1, len(inventory_result))))
def display_h5_inv_detail(self):
CodeCalculation = CIC(self.__class__.bu_name)
print("===Hierarchy_5 Inventory Detail List===")
# Get H5 Name
h5_input = input("Input Hierarchy_5 Name : ")
h5_name = pb_func.get_available_h5_name(h5_input, self.__class__.bu_name)
# if not right h5 name, return
if h5_name == "NULL":
print("No such Hierarchy_5 name and please try again!~")
return
# get the date
inventory_date = input("Inventory Data (YYYYMMDD, Press Enter to get newest) : ")
table_name = CodeCalculation.get_newest_date() if inventory_date == "" else "INV" + inventory_date
[inventory_result, total_inv_value] = CodeCalculation.get_h5_inv_detail(h5_name, table_name)
print("Total Inventory Value of " + h5_name + " is %s" % (format(total_inv_value, ",.0f")))
print(tabulate(inventory_result, headers="firstrow", tablefmt="github",
showindex=range(1, len(inventory_result)), floatfmt=",.0f"))
def display_current_backorder(self):
CodeCalculation = CIC(self.__class__.bu_name)
print("===Current Backorder List===")
# 获取日期
inventory_date = input("Inventory Data (YYYYMMDD, Press Enter to get newest) : ")
table_name = CodeCalculation.get_newest_date() if inventory_date == "" else "INV" + inventory_date
print("===== <Result of %s> =====" % table_name.lstrip("INV"))
backorder_result = CodeCalculation.get_current_bo(table_name)
print(tabulate(backorder_result, headers="firstrow", tablefmt="github",
showindex=range(1, len(backorder_result)), floatfmt=",.0f"))
def display_backorder_trend(self):
# print title
print("===Display Backorder Trend===")
print(">> Calculation ongoing, please wait~")
CodeCalculation = CIC(self.__class__.bu_name)
CodeCalculation.generate_backorder_trend()
print(">> Done, the chart is opened in web browser.")
# display aging backorder list
def display_aging_backorder(self):
# print title
print("===Display Aging Backorder===")
# set exception list with abnormal backorder information
exception_list = pb_func.get_exception_list(self.__class__.bu_name, "Aging_Backorder")
CodeCalculation = CIC(self.__class__.bu_name)
[aging_backorder_list, mapping_days] = CodeCalculation.generate_aging_backorder_list(exception_list)
print("---Aging Backorder List within %s days---" % mapping_days)
print(tabulate(aging_backorder_list, headers="firstrow", tablefmt="psql"))
# display inventory alert with low inventory
def display_low_inventory_alert(self):
# print title
print("===Display Low Inventory Alert===")
# get low inventory result
CodeCalculation = CIC(self.__class__.bu_name)
low_inventory_list = CodeCalculation.get_low_inventory_alert()
print(tabulate(low_inventory_list, headers="keys", tablefmt="psql", showindex="always",
floatfmt=(".0f", ".0f", ".1f", ".1f", ".0f", ".1f", ".0f", ".0f", ".0f")))
def display_current_inventory(self):
CodeCalculation = CIC(self.__class__.bu_name)
print("===Current Inventory List by Hierarchy_5===")
# 获取日期
inventory_date = input("Inventory Data (YYYYMMDD, Press Enter to get newest) : ")
table_name = CodeCalculation.get_newest_date() if inventory_date == "" else "INV" + inventory_date
print("===== <Result of %s> =====" % table_name.lstrip("INV"))
inventory_result, summary_result = CodeCalculation.get_current_inventory(table_name)
print(tabulate(inventory_result, headers="firstrow", tablefmt="psql",
showindex=range(1, len(inventory_result)), floatfmt=",.0f"))
total_available_stock_value, total_useful_stock_value, total_stock_value = summary_result
print("Total Available Stock Value: RMB - %s, USD - %s"
% (format(total_available_stock_value, ",.0f"),
format(total_available_stock_value / self.__class__.currency_rate, ",.0f")))
print("Total Useful Stock Value: RMB - %s, USD - %s"
% (format(total_useful_stock_value, ',.0f'),
format(total_useful_stock_value / self.__class__.currency_rate, ',.0f')))
print("Total Stock Value: RMB - %s, USD - %s"
% (format(total_stock_value, ',.0f'), format(total_stock_value / self.__class__.currency_rate, ',.0f')))
def export_inventory_data(self):
CodeCalculation = CIC(self.__class__.bu_name)
# print title
print("===Export Inventory Detail List===")
# get data
inventory_date = input("Inventory Data (YYYYMMDD, Press Enter to get newest) : ")
table_name = CodeCalculation.get_newest_date() if inventory_date == "" else "INV" + inventory_date
df = CodeCalculation.export_inventory_data(table_name)
if isinstance(df, pd.DataFrame):
inventory_file = self.__class__.inventory_path + self.__class__.bu_name \
+ "_Inventory_" + table_name[3:] + ".xlsx"
df.to_excel(inventory_file, index=False)
print("Inventory detail exported to " + inventory_file)
else:
print("Error. No data in that day, please choose the correct date")
def export_backorder_data(self):
CodeCalculation = CIC(self.__class__.bu_name)
# print title
print("===Export Backorder Detail List===")
# get data
inventory_date = input("Inventory Data (YYYYMMDD, Press Enter to get newest) : ")
table_name = CodeCalculation.get_newest_date() if inventory_date == "" else "INV" + inventory_date
df = CodeCalculation.export_backorder_data(table_name)
if isinstance(df, pd.DataFrame):
backorder_file = self.__class__.backorder_path + self.__class__.bu_name \
+ "_Backorder_" + table_name[3:] + ".xlsx"
df.to_excel(backorder_file, index=False)
print("Backorder detail exported to " + backorder_file)
else:
print("Error. No data in that day, please choose the correct date")
def display_code_inventory_trend(self):
CodeCalculation = CIC(self.__class__.bu_name)
print("===Single Code Available Stock Trend===")
code_name = input("Input Material Code: ")
if CodeCalculation.check_code(code_name):
CodeCalculation.generate_code_inv_trend(code_name)
else:
print("!!Error - This Material Code does NOT exist, Please re-input! ")
def display_h5_inventory_trend(self, chart_type='single_line'):
CodeCalculation = CIC(self.__class__.bu_name)
print("===Hierarchy_5 Available Stock Trend===")
# 获取H5名称
h5_input = input("Input Hierarchy_5 Name: ")
if h5_input == "" or h5_input.upper() == "ALL":
h5_result = "ALL"
else:
h5_result = pb_func.get_available_h5_name(h5_input, self.__class__.bu_name)
# if not right h5 name, return
if h5_result != "NULL":
if chart_type == 'single_line':
CodeCalculation.generate_h5_inventory_trend(h5_result)
elif chart_type == 'double_line':
CodeCalculation.generate_h5_inventory_trend_two_dimension(h5_result)
else:
print("!!Error, No such Hierarchy_5 name. Please try again!")
return
def display_pending_trend(self, chart_type='value'):
print("===Display Pending Inventory Trend for %s===" % self.__class__.bu_name)
CodeCalculation = CIC(self.__class__.bu_name)
CodeCalculation.generate_pending_trend(chart_type)
pass
def synchronize_oneclick_data(self):
CodeCalculation = CIC(self.__class__.bu_name)
lst_xcpt = ['20190118', ]
print("===Sync Current Inventory Data from OneClick===")
sync_result = CodeCalculation.inv_data_sync(90, lst_xcpt)
if sync_result == "ERROR":
print("!Error, the sharefolder cannot be opened. Make sure you've connected to JNJ network and try again.")
else:
print(">> Synchronization succeed!")
print(">> %s days succeed, %s days fail. Updated to %s" % (sync_result[0], sync_result[1], sync_result[2]))
def sync_ned_inventory(self):
CodeCalculation = CIC(self.__class__.bu_name)
print("===Sync Current NED Inventory Data===")
sync_result = CodeCalculation.sync_ned_inventory()
if sync_result:
print(">> Synchronization succeed!")
else:
print(">> Synchronization fail")
pass
def command_list(self):
cmd_list_dict = {"inv": self.display_current_inventory,
"inv_export": self.export_inventory_data,
"inv_alert": self.display_low_inventory_alert,
"bo": self.display_current_backorder,
"bo_export": self.export_backorder_data,
"pending": self.display_pending_trend,
"pending -q": (self.display_pending_trend, "quantity"),
"check": self.display_code_status,
"trend": self.display_code_inventory_trend,
"h5_trend": self.display_h5_inventory_trend,
"h5_trend_q": (self.display_h5_inventory_trend, 'double_line'),
"h5_detail": self.display_h5_inv_detail,
"bo_trend": self.display_backorder_trend,
"mapping": self.display_mapping_inventory,
"aging": self.display_aging_backorder,
"sync": self.synchronize_oneclick_data,
"ned_sync":self.sync_ned_inventory,
"help": self.show_command_list
}
cmd_code = input("cmd >> crt_inv >> ")
while cmd_code.upper() != "EXIT":
if cmd_code in cmd_list_dict:
if isinstance(cmd_list_dict[cmd_code], tuple):
cmd_list_dict[cmd_code][0](cmd_list_dict[cmd_code][1])
else:
cmd_list_dict[cmd_code]()
else:
print("!!ERROR: Wrong CMD code. Plz input correct cmd code, or type \"exit\" to quit.")
cmd_code = input("cmd >> crt_inv >> ")
print("==============================<Back to Main Menu>==============================")
# Display command list
@staticmethod
def show_command_list():
import public_function
public_function.display_command_list("current_inventory_command")
class TraumaCurrentInventoryDisplay(CurrentInventoryDisplay):
def __init__(self):
super().__init__()
self.__class__.bu_name = "TU"
def display_code_status(self):
CodeCalculation = CIC(self.__class__.bu_name)
print("===Single Code Inventory===")
# 获取日期
code_name = input("Input Material Code: ").upper()
# check if this code exist in material master
while not CodeCalculation.check_code(code_name):
code_name = input("Wrong code, please re-input: ").upper()
# start to get inventory data from oneclick database
str_input = input("Please input date (YYYYMMDD) OR press Enter to get most fresh date: ")
table_name = CodeCalculation.get_newest_date() if str_input == "" else "INV" + str_input
# check if this date exist in newest oneclick file
while not CodeCalculation.check_date_availability(table_name):
print("!!Error - Wrong date, Please re-input! ")
str_input = input("Please input date (YYYYMMDD) OR press Enter to get most fresh date: ")
table_name = CodeCalculation.get_newest_date() if str_input == "" else "INV" + str_input
print("===== <Result of %s> =====" % table_name.lstrip("INV"))
code_inv_output = CodeCalculation.get_code_inv_with_ned(code_name, table_name)
print(tabulate(code_inv_output, headers="firstrow", floatfmt=",.0f", tablefmt="github"))
def display_mapping_inventory(self):
CodeCalculation = CIC(self.__class__.bu_name)
print("===Inventory Status Mapping with Lists===")
# get data file
file_fullname = self.__class__.source_file_path + "Data_Mapping.txt"
try:
fo = open(file_fullname, "r")
except FileNotFoundError:
print("!Error, please make sure you have put Data_Mapping.txt under _Source_Data folder")
return
code_list = [item.strip() for item in fo.readlines()]
# get the date
inventory_date = input("Inventory Data (YYYYMMDD, Press Enter to get newest) : ")
if inventory_date == "":
table_name = CodeCalculation.get_newest_date()
else:
table_name = "INV" + inventory_date
if not CodeCalculation.check_date_availability(table_name):
print("!Error, please make sure you input the correct date.")
return
inventory_result = CodeCalculation.inventory_mapping_with_ned_inv(code_list, table_name)
print(tabulate(inventory_result, headers="firstrow", tablefmt="github",
showindex=range(1, len(inventory_result))))
def display_current_backorder(self):
code_calculation = TU_CIC()
# code_calculation = CIC('TU')
print("===Current Backorder List===")
# 获取日期
inventory_date = input("Inventory Data (YYYYMMDD, Press Enter to get newest) : ")
table_name = code_calculation.get_newest_date() if inventory_date == "" else "INV" + inventory_date
print("===== <Result of %s> =====" % table_name.lstrip("INV"))
backorder_result = code_calculation.get_current_bo(table_name)
print(tabulate(backorder_result, headers="firstrow", tablefmt="github",
showindex=range(1, len(backorder_result)), floatfmt=",.0f"))
class PowerToolCurrentInventoryDisplay(CurrentInventoryDisplay):
def __init__(self):
super().__init__()
self.__class__.bu_name = "PT"
def display_low_inventory_alert(self):
print("!!Warning. This function is not available for PT.")
class CMFTCurrentInventoryDisplay(CurrentInventoryDisplay):
def __init__(self):
super().__init__()
self.__class__.bu_name = "CMF"
def display_low_inventory_alert(self):
print("!!Warning. This function is not available for CMFT.")
class JointCurrentInventoryDisplay(CurrentInventoryDisplay):
def __init__(self):
super().__init__()
self.__class__.bu_name = "JT"
def display_low_inventory_alert(self):
print("!!Warning. This function is not available for Joint.")
class MitekCurrentInventoryDisplay(CurrentInventoryDisplay):
def __init__(self):
super().__init__()
self.__class__.bu_name = "MT"
def display_low_inventory_alert(self):
print("!!Warning. This function is not available for Mitek.")
class SpineCurrentInventoryDisplay(CurrentInventoryDisplay):
def __init__(self):
super().__init__()
self.__class__.bu_name = "Spine"
def display_low_inventory_alert(self):
print("!!Warning. This function is not available for Spine.")
if __name__ == "__main__":
test = TraumaCurrentInventoryDisplay()
test.display_current_backorder()
# test.inv_data_sync(50)
```
#### File: Antares/antares/data_display.py
```python
import json
import sqlite3
import time
import calculation
from tabulate import tabulate
import public_function as pb_func
import draw_chart as chart
class DataDisplay:
bu_name = ""
user_name = ""
def __init__(self, bu, name):
self.__class__.bu_name = bu
self.__class__.user_name = name
def get_current_month(self):
return time.strftime("%Y-%m", time.localtime())
@staticmethod
def show_command_list():
pb_func.display_command_list("public_command")
# 数据的表格化输出
def format_output(self, data):
print(tabulate(data, tablefmt="psql", headers="firstrow", floatfmt=",.0f"))
# 显示单个代码销量数据
def show_code_sales_data(self, month_number=12):
# print title
print("---- %sM Sales List for Single Code---" % month_number)
material_code = input("Material code: ").upper()
if not pb_func.check_code_availability(self.__class__.bu_name, material_code):
print("!!ERROR, This code does NOT exist.")
return
else:
# Get master data
infocheck = calculation.InfoCheck(self.__class__.bu_name)
mm_result = infocheck.get_single_code_all_master_data(material_code, ['Description'])[0]
print(material_code, ": ", mm_result)
self.format_output(self.list_code_sales_data(material_code, month_number))
# display sales for single code
def list_code_sales_data(self, material_code, month_number):
infocheck = calculation.InfoCheck(self.__class__.bu_name)
# generate month list
sales_title = ["Month", "GTS", "LPSales", "IMS"]
sales_output = [infocheck.get_time_list(self.get_current_month(), 0 - month_number)]
# get sales data
sale_type = ["GTS", "LPSales", "IMS"]
for sales_item in sale_type:
sales_output.append(infocheck.get_code_sales(sales_item, material_code, month_number))
return pb_func.add_table_index(sales_output, sales_title)
# 显示单个代码历史库存量
def show_code_historical_inventory(self, month_number=12):
# Print title
print("---- -Historical Inventory for Single Code---")
material_code = input("Material code: ").upper()
# 读取Master Data
infocheck = calculation.InfoCheck(self.__class__.bu_name)
if not pb_func.check_code_availability(self.__class__.bu_name, material_code):
print("!!ERROR, This code does NOT exist.")
return
else:
mm_result = infocheck.get_single_code_all_master_data(material_code, ['Description'])[0]
print(material_code, ": ", mm_result)
# Generate date list
self.format_output(self.list_code_historical_inventory(material_code, month_number))
# generate code level historical inventory quantity and month
def list_code_historical_inventory(self, material_code, month_number):
infocheck = calculation.InfoCheck(self.__class__.bu_name)
month_list = infocheck.get_time_list(self.get_current_month(), 0-month_number)
jnj_inventory_quantity = infocheck.get_code_inventory(material_code, "JNJ", month_number)
lp_inventory_quantity = infocheck.get_code_inventory(material_code, "LP", month_number)
gts_quantity = infocheck.get_code_sales("GTS", material_code, month_number)
lpsales_quantity = infocheck.get_code_sales("LPSales", material_code, month_number)
jnj_inventory_month = infocheck.get_inventory_month(jnj_inventory_quantity, gts_quantity, month_number)
lp_inventory_month = infocheck.get_inventory_month(lp_inventory_quantity, lpsales_quantity, month_number)
inventory_output = [month_list, jnj_inventory_quantity, jnj_inventory_month, lp_inventory_quantity,
lp_inventory_month]
inventory_title = ["Month", "JNJ_INV", "JNJ_INV_Mth", "LP_INV", "LP_INV_Mth"]
return pb_func.add_table_index(inventory_output, inventory_title)
# 显示单个代码全部信息
def show_code_all_info(self, month_number=12):
# 打印标题
print("---- Overall Information for Single Code---")
material_code = input("Material code: ").strip().upper()
infocheck = calculation.InfoCheck(self.__class__.bu_name)
master_data_list = ['Description', 'Chinese_Description', 'Hierarchy_4', 'Hierarchy_5', 'Sales_Status',
'Purchase_Status', 'Standard_Cost', 'SAP_Price', 'Ranking', 'MRP_Type', 'Reorder_Point',
'Phoenix_Status', 'Phoenix_Discontinuation_Date', 'Phoenix_Obsolescence_Date', 'GTIN', 'RAG']
master_data_result = infocheck.get_single_code_all_master_data(material_code, master_data_list)
if master_data_result:
print("======= <Detail Information> =======")
for i in range(len(master_data_list)):
if master_data_list[i] == 'RAG':
print("---- <RAG Information> ----")
rag_result = json.loads(master_data_result[i])
for j in range(len(rag_result)):
print(rag_result[str(j+1)]['REGLICNO'], " - ", rag_result[str(j+1)]['REGAPDATE'], " - ",
rag_result[str(j+1)]['REGEXDATE'], " - ", rag_result[str(j+1)]['LIFEYEAR'])
else:
print(master_data_list[i], " - ", master_data_result[i])
print("======= <Sales & Inventory Information> =======")
# 开始输出销售量
print("--%s Months Historical Sales Data --" % month_number)
self.format_output(self.list_code_sales_data(material_code, month_number))
# 开始输出库存历史量
print("--%s Months Historical Inventory Data --" % month_number)
self.format_output(self.list_code_historical_inventory(material_code, month_number))
# 显示Statistical Forecast
print("--Next 12 Months Statistical Forecast--")
forecast_quantity = infocheck.get_code_forecast(material_code, "Statistical", 12)
if forecast_quantity != "Fail":
self.format_output(forecast_quantity)
# 显示Final Forecast
print("--Next 12 Months Final Forecast--")
forecast_quantity = infocheck.get_code_forecast(material_code, "Final", 12)
if forecast_quantity != "Fail":
self.format_output(forecast_quantity)
# 显示ESO
self.display_material_eso(material_code, "code")
print("-----------END-----------")
else:
print("!! Error. This code does not exist.")
return
# get hierarchy_5 name
def get_h5_name(self):
h5_input = input("Please input Hierarchy_5 name: ")
h5_name = "ALL" if h5_input.upper() == "ALL" else pb_func.get_available_h5_name(h5_input, self.__class__.bu_name)
return h5_name
# get sales data for one Hierarchy_5
def get_h5_sales_data(self, h5_name, month_number):
h5_info_check = calculation.InfoCheck(self.__class__.bu_name)
print("====================================================================")
print("--24 Month Historical Sales Data for %s--" % h5_name)
price_type = ("Standard_Cost", "SAP_Price")
sales_type = ("GTS", "LPSales", "IMS")
for price_item in price_type:
# Print price title
print("-With %s-" % price_item)
# Add Month list
sales_title = ["Month", "GTS", "LPSales", "IMS"]
h5_sales_result = [h5_info_check.get_time_list(self.get_current_month(), 0 - month_number)]
# get data
for sales_item in sales_type:
h5_sales_result.append(h5_info_check.get_h5_sales_data(sales_item, price_item, h5_name, month_number))
self.format_output(pb_func.add_table_index(h5_sales_result, sales_title))
# show sales data for one Hierarchy_5
def show_h5_sales_data(self, month_number=12):
h5_name = self.get_h5_name()
if h5_name != "NULL":
self.get_h5_sales_data(h5_name, month_number)
else:
print("!!Error, Wrong Hierarchy_5 Name, Please Check!")
# get inventory data for one Hierarchy_5:
def get_h5_inventory(self, h5_name, month_number):
h5_info_check = calculation.InfoCheck(self.__class__.bu_name)
# Print title
print("====================================================================")
print("--%s Month Historical Inventory for %s (RMB)--" % (month_number, h5_name))
# price_type = ("Standard_Cost", "SAP_Price")
inv_type = ("JNJ", "LP")
inv_std_cost_title = ["Month", "JNJ", "LP"]
inv_sap_price_title = ["Month", "JNJ", "JNJ_Mth", "LP", "LP_Mth"]
# print with Standard Cost
print("-With Standard_Cost-")
h5_inv_result = [h5_info_check.get_time_list(self.get_current_month(), 0 - month_number)]
for inv_item in inv_type:
h5_inv_result.append(h5_info_check.get_h5_inventory_data(inv_item, "Standard_Cost", h5_name, month_number))
self.format_output(pb_func.add_table_index(h5_inv_result, inv_std_cost_title))
# print with SAP Price
print("-With SAP Price-")
h5_inv_result = [h5_info_check.get_time_list(self.get_current_month(), 0 - month_number)]
inv_parameter = [["JNJ", "GTS"], ["LP", "LPSales"]]
for para_item in inv_parameter:
[inv_type, sale_type] = para_item
# get sales data
h5_sales_result = h5_info_check.get_h5_sales_data(sale_type, "SAP_Price", h5_name, month_number)
# generate inventory value and inventory month
h5_inv_output = h5_info_check.get_h5_inventory_data(inv_type, "SAP_Price", h5_name, month_number)
h5_inv_month = h5_info_check.get_inventory_month(h5_inv_output, h5_sales_result, month_number)
h5_inv_result.extend([h5_inv_output, h5_inv_month])
self.format_output(pb_func.add_table_index(h5_inv_result, inv_sap_price_title))
def show_h5_inventory(self, month_number=12):
h5_name = self.get_h5_name()
if h5_name != "NULL":
self.get_h5_inventory(h5_name, month_number)
else:
print("!!Error, Wrong Hierarchy_5 Name, Please Check!")
# display forecast for one Hierarchy_5
def show_h5_forecast(self, h5_name, fcst_type, month_quntity=12):
print("== %s Forecast for %s ==" % (fcst_type, h5_name))
forecast_calculation = calculation.InfoCheck(self.__class__.bu_name)
month_list = forecast_calculation.get_time_list(self.get_current_month(), month_quntity)
forecast_result = forecast_calculation.get_h5_forecast(h5_name, fcst_type, month_quntity)
forecast_output = pb_func.add_table_index([month_list, forecast_result], ["Month", "Value (SAP Price)"])
self.format_output(forecast_output)
pass
# Show all information of one Hierarchy_5
def show_h5_all_info(self, month_number=12, forecast_month=12):
h5_name = self.get_h5_name()
if h5_name != "NULL":
self.get_h5_sales_data(h5_name, month_number)
self.get_h5_inventory(h5_name, month_number)
self.show_h5_forecast(h5_name, "Statistical", forecast_month)
self.show_h5_forecast(h5_name, "Final", forecast_month)
self.display_material_eso(h5_name, "h5")
else:
print("!!Error, Wrong Hierarchy_5 Name, Please Check!")
# 显示单个代码的ESO
def show_code_eso(self):
code_name = input("Input Material Code: ").upper()
self.display_material_eso(code_name, eso_type="code")
# display eso for single code
def display_material_eso(self, material_code, eso_type):
info_check = calculation.InfoCheck(self.__class__.bu_name)
eso_result = info_check.get_material_eso(material_code, eso_type)
if eso_type == "code":
eso_output = [["Cycle", ], ["E_Qty", ], ["SM_Qty", ], ["O_Qty", ], ["ESO_Qty", ], ["Total_ESO_Value", ]]
else:
eso_output = [["Cycle", ], ["NPI_Reverse_ESO", ], ["Total_ESO_Value", ]]
for item in eso_result:
for index in range(0, len(eso_output)):
eso_output[index].append(item[index])
self.format_output(eso_output)
# 显示单个代码的综合图表
def show_code_chart(self):
print("==Single Code General Chart==")
material_code = input("Material code: ").upper()
# 验证代码是否存在
if not pb_func.check_code_availability(self.__class__.bu_name, material_code):
print("!! This code does no exist, please try again.")
return
# 读取销量数据
infocheck = calculation.InfoCheck(self.__class__.bu_name)
sales_list = ("GTS", "LPSales", "IMS")
sales_output = []
for index in range(0, 3):
sales_output.append(infocheck.get_code_sales(sales_list[index], material_code, 24))
# 读取final forecast
code_forecast = infocheck.get_code_forecast(material_code, "Final", 12)[1]
# 读取库存数据
historical_jnj_inv = infocheck.get_code_inventory(material_code, "JNJ", 24)
historical_lp_inv = infocheck.get_code_inventory(material_code, "LP", 24)
historical_inv = [historical_jnj_inv, historical_lp_inv]
self.draw_sales_inv_fcst_chart(material_code, sales_output, historical_inv, code_forecast, 12, "code")
# 显示H5的综合图表
def show_h5_chart(self):
# 打印标题
print("==Hierarchy_5 General Chart==")
h5_name = self.get_h5_name()
if h5_name == "NULL":
return
h5_info_check = calculation.InfoCheck(self.__class__.bu_name)
# 读取数据
# 读取历史销量
sales_type = ("GTS", "LPSales", "IMS")
h5_sales_result = []
for sales_item in sales_type:
h5_sales_result.append(h5_info_check.get_h5_sales_data(sales_item, "SAP_Price", h5_name, 24))
# 读取库存量
inv_type = ("JNJ", "LP")
h5_inv_result = []
for inv_item in inv_type:
h5_inv_result.append(h5_info_check.get_h5_inventory_data(inv_item, "SAP_Price", h5_name, 24))
# 读取final forecast
h5_forecast = h5_info_check.get_h5_forecast(h5_name, "Final", 12)
# Generate the chart with 12 months forecast
chart_name = self.__class__.bu_name if h5_name == 'ALL' else h5_name
self.draw_sales_inv_fcst_chart(chart_name, h5_sales_result, h5_inv_result, h5_forecast, 12, "h5")
pass
# 画综合图
def draw_sales_inv_fcst_chart(self, name, sales_data, inv_data, fcst_data, fcst_month, data_type):
# get integer format of all sales data
sales_gts = list(map(int, sales_data[0]))
sales_lpsales = list(map(int, sales_data[1]))
sales_ims = list(map(int, sales_data[2]))
jnj_inv, lp_inv = inv_data
# set blank and zero list
lst_blank, lst_zero = [], []
for index in range(0, 12):
lst_blank.append(None)
lst_zero.append(0)
# get inventory month
jnj_inv_month = calculation.InfoCheck.get_inventory_month(jnj_inv, sales_gts, 24, blank_type=1)
lp_inv_month = calculation.InfoCheck.get_inventory_month(lp_inv, sales_lpsales, 24, blank_type=1)
# fulfill sales and inventory data with blank in future 12 months
sales_gts.extend(lst_blank)
sales_lpsales.extend(lst_blank)
sales_ims.extend(lst_blank)
jnj_inv_month.extend(lst_blank)
lp_inv_month.extend(lst_blank)
# fulfill forecast data with blank in historical 24 months
final_fcst_data = lst_blank + lst_blank + fcst_data
# link fcst data with gts
final_fcst_data[23] = sales_gts[23]
# start to generate chart
# generate full month list
infocheck = calculation.InfoCheck(self.__class__.bu_name)
historical_month_list = infocheck.get_time_list(self.get_current_month(), -24)
final_month_list = historical_month_list + infocheck.get_time_list(self.get_current_month(), fcst_month)
# draw the chart
chart.all_in_one_echart(name, final_month_list, jnj_inv_month, lp_inv_month, sales_gts, sales_lpsales,
sales_ims, final_fcst_data, data_type)
print("--The chart is generated, you can also find it under ../data/_Charter --")
if __name__ == "__main__":
test = DataDisplay("TU", "Jeffrey")
test.show_code_historical_inventory()
```
#### File: Antares/antares/gui_design.py
```python
import wx
import wx.xrc
import wx.adv
###########################################################################
## Class DragonFrame
###########################################################################
class DragonFrame ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Project Dragon GUI v0609", pos = wx.DefaultPosition, size = wx.Size( 1280,800 ), style = wx.CAPTION|wx.CLOSE_BOX|wx.MINIMIZE_BOX|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
bSizer1 = wx.BoxSizer( wx.VERTICAL )
self.ntbkOneclick = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.NB_TOP )
self.pnlOneclick = wx.Panel( self.ntbkOneclick, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer4 = wx.BoxSizer( wx.VERTICAL )
bSizer5 = wx.BoxSizer( wx.HORIZONTAL )
bSizer5.SetMinSize( wx.Size( -1,80 ) )
bSizer5.Add( ( 20, 0), 0, wx.EXPAND, 5 )
self.btnSync = wx.BitmapButton( self.pnlOneclick, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|wx.BORDER_NONE )
self.btnSync.SetBitmap( wx.Bitmap( u".icon/sync_black.png", wx.BITMAP_TYPE_ANY ) )
self.btnSync.SetBitmapCurrent( wx.Bitmap( u".icon/sync_blue.png", wx.BITMAP_TYPE_ANY ) )
self.btnSync.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.btnSync.SetToolTip( u"Sync Inventory with oneclick file" )
bSizer5.Add( self.btnSync, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer5.Add( ( 20, 0), 0, wx.EXPAND, 5 )
self.m_staticline7 = wx.StaticLine( self.pnlOneclick, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_VERTICAL )
bSizer5.Add( self.m_staticline7, 0, wx.EXPAND |wx.ALL, 5 )
bSizer5.Add( ( 20, 0), 0, wx.EXPAND, 5 )
rdbxCalculationTypeChoices = [ u"by Code", u"by Hierarchy" ]
self.rdbxCalculationType = wx.RadioBox( self.pnlOneclick, wx.ID_ANY, u"Calculation Type", wx.DefaultPosition, wx.Size( -1,88 ), rdbxCalculationTypeChoices, 1, wx.RA_SPECIFY_COLS )
self.rdbxCalculationType.SetSelection( 0 )
bSizer5.Add( self.rdbxCalculationType, 0, wx.ALL|wx.ALIGN_BOTTOM, 5 )
bSizer5.Add( ( 10, 20), 0, wx.EXPAND, 5 )
bSizer9 = wx.BoxSizer( wx.VERTICAL )
bSizer101 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText2 = wx.StaticText( self.pnlOneclick, wx.ID_ANY, u"Material / H5 Name: ", wx.DefaultPosition, wx.Size( -1,15 ), wx.ALIGN_CENTER_HORIZONTAL )
self.m_staticText2.Wrap( -1 )
self.m_staticText2.SetMaxSize( wx.Size( -1,30 ) )
bSizer101.Add( self.m_staticText2, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer101.Add( ( 280, 0), 0, wx.EXPAND, 5 )
self.chkbxWholeBU = wx.CheckBox( self.pnlOneclick, wx.ID_ANY, u"BU Level", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer101.Add( self.chkbxWholeBU, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer9.Add( bSizer101, 1, wx.EXPAND, 5 )
bSizer11 = wx.BoxSizer( wx.HORIZONTAL )
self.txtMaterialCode = wx.TextCtrl( self.pnlOneclick, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 120,80 ), wx.TE_MULTILINE )
bSizer11.Add( self.txtMaterialCode, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
lstbxCodeSelectionChoices = []
self.lstbxCodeSelection = wx.ListBox( self.pnlOneclick, wx.ID_ANY, wx.DefaultPosition, wx.Size( 350,80 ), lstbxCodeSelectionChoices, wx.LB_NEEDED_SB|wx.LB_SINGLE )
bSizer11.Add( self.lstbxCodeSelection, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer9.Add( bSizer11, 0, wx.EXPAND, 5 )
bSizer5.Add( bSizer9, 0, wx.EXPAND, 5 )
bSizer5.Add( ( 30, 0), 0, wx.EXPAND, 5 )
bSizer71 = wx.BoxSizer( wx.VERTICAL )
bSizer71.SetMinSize( wx.Size( 0,60 ) )
bSizer71.Add( ( 1, 25), 0, wx.EXPAND, 5 )
self.chkbxToday = wx.CheckBox( self.pnlOneclick, wx.ID_ANY, u"Today", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer71.Add( self.chkbxToday, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
bSizer71.Add( ( 0, 5), 0, wx.EXPAND, 5 )
self.dtpkDate = wx.adv.DatePickerCtrl( self.pnlOneclick, wx.ID_ANY, wx.DefaultDateTime, wx.Point( 0,0 ), wx.Size( 110,30 ), wx.adv.DP_DROPDOWN|wx.BORDER_SUNKEN )
bSizer71.Add( self.dtpkDate, 0, wx.ALL, 5 )
bSizer5.Add( bSizer71, 0, wx.EXPAND, 5 )
bSizer5.Add( ( 25, 0), 0, wx.EXPAND, 5 )
self.btnCodeSubmit = wx.BitmapButton( self.pnlOneclick, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|wx.BORDER_NONE )
self.btnCodeSubmit.SetBitmap( wx.Bitmap( u".icon/submit.png", wx.BITMAP_TYPE_ANY ) )
self.btnCodeSubmit.SetBitmapCurrent( wx.Bitmap( u".icon/submit_green.png", wx.BITMAP_TYPE_ANY ) )
self.btnCodeSubmit.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.btnCodeSubmit.SetToolTip( u"Submit" )
bSizer5.Add( self.btnCodeSubmit, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer5.Add( ( 20, 0), 0, wx.EXPAND, 5 )
self.btnReset = wx.BitmapButton( self.pnlOneclick, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|wx.BORDER_NONE )
self.btnReset.SetBitmap( wx.Bitmap( u".icon/reset.png", wx.BITMAP_TYPE_ANY ) )
self.btnReset.SetBitmapCurrent( wx.Bitmap( u".icon/reset_red.png", wx.BITMAP_TYPE_ANY ) )
self.btnReset.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.btnReset.SetToolTip( u"Reset Input" )
bSizer5.Add( self.btnReset, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer4.Add( bSizer5, 0, wx.EXPAND, 5 )
self.m_staticline1 = wx.StaticLine( self.pnlOneclick, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL )
bSizer4.Add( self.m_staticline1, 0, wx.EXPAND |wx.ALL, 5 )
bSizer6 = wx.BoxSizer( wx.VERTICAL )
bSizer6.SetMinSize( wx.Size( -1,30 ) )
bSizer8 = wx.BoxSizer( wx.HORIZONTAL )
self.btnInventoryExport = wx.BitmapButton( self.pnlOneclick, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|wx.BORDER_NONE )
self.btnInventoryExport.SetBitmap( wx.Bitmap( u".icon/inventory_export.png", wx.BITMAP_TYPE_ANY ) )
self.btnInventoryExport.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.btnInventoryExport.SetToolTip( u"Export Inventory" )
bSizer8.Add( self.btnInventoryExport, 0, wx.ALL, 5 )
self.btnBackorderExport = wx.BitmapButton( self.pnlOneclick, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|wx.BORDER_NONE )
self.btnBackorderExport.SetBitmap( wx.Bitmap( u".icon/backorder_export.png", wx.BITMAP_TYPE_ANY ) )
self.btnBackorderExport.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.btnBackorderExport.SetToolTip( u"Export Backorder" )
bSizer8.Add( self.btnBackorderExport, 0, wx.ALL, 5 )
self.m_staticline6 = wx.StaticLine( self.pnlOneclick, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_VERTICAL )
self.m_staticline6.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
bSizer8.Add( self.m_staticline6, 0, wx.EXPAND |wx.ALL, 5 )
bSizer8.Add( ( 20, 0), 0, wx.EXPAND, 5 )
self.m_staticText41 = wx.StaticText( self.pnlOneclick, wx.ID_ANY, u"Log:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText41.Wrap( -1 )
bSizer8.Add( self.m_staticText41, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.txtLog = wx.TextCtrl( self.pnlOneclick, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 800,-1 ), wx.TE_CENTER|wx.TE_READONLY|wx.BORDER_SUNKEN )
self.txtLog.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_CAPTIONTEXT ) )
self.txtLog.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_INFOBK ) )
bSizer8.Add( self.txtLog, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
bSizer8.Add( ( 200, 0), 0, wx.EXPAND, 5 )
self.btnOutputDownload = wx.BitmapButton( self.pnlOneclick, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|wx.BORDER_NONE )
self.btnOutputDownload.SetBitmap( wx.Bitmap( u".icon/data_download.png", wx.BITMAP_TYPE_ANY ) )
self.btnOutputDownload.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.btnOutputDownload.SetToolTip( u"Download data in the list" )
bSizer8.Add( self.btnOutputDownload, 0, wx.ALL, 5 )
bSizer6.Add( bSizer8, 1, wx.EXPAND, 5 )
bSizer4.Add( bSizer6, 0, wx.EXPAND, 5 )
self.m_staticline2 = wx.StaticLine( self.pnlOneclick, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL )
bSizer4.Add( self.m_staticline2, 0, wx.EXPAND |wx.ALL, 5 )
bSizer7 = wx.BoxSizer( wx.VERTICAL )
self.listCtrlOutput = wx.ListCtrl( self.pnlOneclick, wx.ID_ANY, wx.DefaultPosition, wx.Size( 1275,-1 ), wx.LC_REPORT|wx.LC_VRULES )
bSizer7.Add( self.listCtrlOutput, 1, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
bSizer4.Add( bSizer7, 1, wx.EXPAND, 5 )
self.pnlOneclick.SetSizer( bSizer4 )
self.pnlOneclick.Layout()
bSizer4.Fit( self.pnlOneclick )
self.ntbkOneclick.AddPage( self.pnlOneclick, u"Oneclick", False )
bSizer1.Add( self.ntbkOneclick, 1, wx.EXPAND |wx.ALL, 5 )
self.SetSizer( bSizer1 )
self.Layout()
self.statusBar = self.CreateStatusBar( 2, wx.STB_SIZEGRIP, wx.ID_ANY )
self.m_menubar1 = wx.MenuBar( 0 )
self.menuFile = wx.Menu()
self.mExit = wx.MenuItem( self.menuFile, wx.ID_ANY, u"Exit", wx.EmptyString, wx.ITEM_NORMAL )
self.menuFile.Append( self.mExit )
self.m_menubar1.Append( self.menuFile, u"File" )
self.menuAbout = wx.Menu()
self.showAbout = wx.MenuItem( self.menuAbout, wx.ID_ANY, u"About", wx.EmptyString, wx.ITEM_NORMAL )
self.menuAbout.Append( self.showAbout )
self.m_menubar1.Append( self.menuAbout, u"About" )
self.SetMenuBar( self.m_menubar1 )
self.m_toolBar1 = self.CreateToolBar( wx.TB_HORIZONTAL, wx.ID_ANY )
self.m_toolBar1.SetToolBitmapSize( wx.Size( 20,20 ) )
self.m_toolBar1.SetToolPacking( 0 )
self.m_toolBar1.AddSeparator()
self.mDisplayCurrentInventory = self.m_toolBar1.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u".icon/current_day.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_NORMAL, u"Display current inventory", wx.EmptyString, None )
self.m_toolBar1.AddSeparator()
self.mCurrentBackorder = self.m_toolBar1.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u".icon/backorder.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_NORMAL, u"Display current backorder", wx.EmptyString, None )
self.mBackorderTrend = self.m_toolBar1.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u".icon/bo_trend.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_NORMAL, u"display trend of backorder value", wx.EmptyString, None )
self.mAgingBackorder = self.m_toolBar1.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u".icon/aging_backorder.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_NORMAL, u"display aging backorder", wx.EmptyString, None )
self.m_toolBar1.AddSeparator()
self.mPendingInventory = self.m_toolBar1.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u".icon/pending.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_NORMAL, u"Display trend for pending inventory value", wx.EmptyString, None )
self.mLowABInventory = self.m_toolBar1.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u".icon/low_inventory.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_NORMAL, u"Display low inventory for Rank A, B products", wx.EmptyString, None )
self.m_toolBar1.AddSeparator()
self.mInvTrend = self.m_toolBar1.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u".icon/inventory_trend.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_NORMAL, u"Display inventory trend with selected name", wx.EmptyString, None )
self.m_toolBar1.AddSeparator()
self.m_toolBar1.AddSeparator()
self.m_toolBar1.AddSeparator()
self.m_toolBar1.AddSeparator()
self.m_toolBar1.AddSeparator()
self.m_toolBar1.AddSeparator()
self.m_toolBar1.AddSeparator()
self.m_toolBar1.AddSeparator()
self.mTU = self.m_toolBar1.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u".icon/bu_TU.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_RADIO, u"Trauma", wx.EmptyString, None )
self.mCMFT = self.m_toolBar1.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u".icon/bu_CMFT.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_RADIO, u"CMFT", wx.EmptyString, None )
self.mPT = self.m_toolBar1.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u".icon/bu_PT.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_RADIO, u"PowerTool", wx.EmptyString, None )
self.mJT = self.m_toolBar1.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u".icon/bu_JT.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_RADIO, u"Joint", wx.EmptyString, None )
self.mSpine = self.m_toolBar1.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u".icon/bu_Spine.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_RADIO, u"Spine", wx.EmptyString, None )
self.mMT = self.m_toolBar1.AddLabelTool( wx.ID_ANY, u"tool", wx.Bitmap( u".icon/bu_MT.png", wx.BITMAP_TYPE_ANY ), wx.NullBitmap, wx.ITEM_RADIO, u"Mitek", wx.EmptyString, None )
self.m_toolBar1.Realize()
self.Centre( wx.BOTH )
# Connect Events
self.btnSync.Bind( wx.EVT_BUTTON, self.sync_inventory )
self.chkbxWholeBU.Bind( wx.EVT_CHECKBOX, self.bu_level_selected )
self.lstbxCodeSelection.Bind( wx.EVT_LEFT_DCLICK, self.display_h5_inventory )
self.chkbxToday.Bind( wx.EVT_CHECKBOX, self.set_date_as_today )
self.dtpkDate.Bind( wx.adv.EVT_DATE_CHANGED, self.set_checking_date )
self.btnCodeSubmit.Bind( wx.EVT_BUTTON, self.codeSubmit )
self.btnReset.Bind( wx.EVT_BUTTON, self.clear_input )
self.btnInventoryExport.Bind( wx.EVT_BUTTON, self.export_inventory )
self.btnBackorderExport.Bind( wx.EVT_BUTTON, self.export_backorder )
self.btnOutputDownload.Bind( wx.EVT_BUTTON, self.export_listed_data )
self.listCtrlOutput.Bind( wx.EVT_LIST_ITEM_ACTIVATED, self.click_item_in_list )
self.Bind( wx.EVT_MENU, self.exit_dragon, id = self.mExit.GetId() )
self.Bind( wx.EVT_MENU, self.show_about_dialog, id = self.showAbout.GetId() )
self.Bind( wx.EVT_TOOL, self.get_current_inventory_list, id = self.mDisplayCurrentInventory.GetId() )
self.Bind( wx.EVT_TOOL, self.get_current_bo_list, id = self.mCurrentBackorder.GetId() )
self.Bind( wx.EVT_TOOL, self.display_backorder_trend, id = self.mBackorderTrend.GetId() )
self.Bind( wx.EVT_TOOL, self.display_aging_backorder, id = self.mAgingBackorder.GetId() )
self.Bind( wx.EVT_TOOL, self.display_pending_inventory, id = self.mPendingInventory.GetId() )
self.Bind( wx.EVT_TOOL, self.display_low_AB_inventory, id = self.mLowABInventory.GetId() )
self.Bind( wx.EVT_TOOL, self.display_inventory_trend, id = self.mInvTrend.GetId() )
self.Bind( wx.EVT_TOOL, self.select_bu_TU, id = self.mTU.GetId() )
self.Bind( wx.EVT_TOOL, self.select_bu_CMFT, id = self.mCMFT.GetId() )
self.Bind( wx.EVT_TOOL, self.select_bu_PT, id = self.mPT.GetId() )
self.Bind( wx.EVT_TOOL, self.select_bu_JT, id = self.mJT.GetId() )
self.Bind( wx.EVT_TOOL, self.select_bu_SP, id = self.mSpine.GetId() )
self.Bind( wx.EVT_TOOL, self.select_bu_MT, id = self.mMT.GetId() )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def sync_inventory( self, event ):
event.Skip()
def bu_level_selected( self, event ):
event.Skip()
def display_h5_inventory( self, event ):
event.Skip()
def set_date_as_today( self, event ):
event.Skip()
def set_checking_date( self, event ):
event.Skip()
def codeSubmit( self, event ):
event.Skip()
def clear_input( self, event ):
event.Skip()
def export_inventory( self, event ):
event.Skip()
def export_backorder( self, event ):
event.Skip()
def export_listed_data( self, event ):
event.Skip()
def click_item_in_list( self, event ):
event.Skip()
def exit_dragon( self, event ):
event.Skip()
def show_about_dialog( self, event ):
event.Skip()
def get_current_inventory_list( self, event ):
event.Skip()
def get_current_bo_list( self, event ):
event.Skip()
def display_backorder_trend( self, event ):
event.Skip()
def display_aging_backorder( self, event ):
event.Skip()
def display_pending_inventory( self, event ):
event.Skip()
def display_low_AB_inventory( self, event ):
event.Skip()
def display_inventory_trend( self, event ):
event.Skip()
def select_bu_TU( self, event ):
event.Skip()
def select_bu_CMFT( self, event ):
event.Skip()
def select_bu_PT( self, event ):
event.Skip()
def select_bu_JT( self, event ):
event.Skip()
def select_bu_SP( self, event ):
event.Skip()
def select_bu_MT( self, event ):
event.Skip()
###########################################################################
## Class dlgAbout
###########################################################################
class dlgAbout ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = u"About", pos = wx.DefaultPosition, size = wx.Size( 400,260 ), style = wx.DEFAULT_DIALOG_STYLE|wx.STAY_ON_TOP )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
bSizer11 = wx.BoxSizer( wx.HORIZONTAL )
bSizer13 = wx.BoxSizer( wx.VERTICAL )
bSizer13.Add( ( 0, 10), 0, wx.EXPAND, 5 )
bSizer14 = wx.BoxSizer( wx.HORIZONTAL )
self.bitmapLogo = wx.StaticBitmap( self, wx.ID_ANY, wx.Bitmap( u".icon/logo.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
bSizer14.Add( self.bitmapLogo, 0, wx.ALL, 5 )
bSizer15 = wx.BoxSizer( wx.VERTICAL )
self.txtCtrlAbout = wx.TextCtrl( self, wx.ID_ANY, u"The program is GUI only for Project Dragon, to collect and centralize data from oneclick and display historical business information.", wx.DefaultPosition, wx.Size( 200,120 ), wx.TE_MULTILINE|wx.TE_NO_VSCROLL|wx.TE_READONLY|wx.BORDER_NONE )
self.txtCtrlAbout.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BTNFACE ) )
bSizer15.Add( self.txtCtrlAbout, 0, wx.ALL, 5 )
bSizer15.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.txtAuthor = wx.StaticText( self, wx.ID_ANY, u"by <NAME>", wx.DefaultPosition, wx.DefaultSize, 0 )
self.txtAuthor.Wrap( -1 )
bSizer15.Add( self.txtAuthor, 0, wx.ALL|wx.ALIGN_RIGHT, 5 )
bSizer15.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.btnAboutClose = wx.Button( self, wx.ID_ANY, u"OK", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer15.Add( self.btnAboutClose, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
bSizer14.Add( bSizer15, 1, wx.EXPAND, 5 )
bSizer13.Add( bSizer14, 1, wx.EXPAND, 5 )
bSizer13.Add( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer11.Add( bSizer13, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer11 )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.btnAboutClose.Bind( wx.EVT_BUTTON, self.close_about_dialog )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def close_about_dialog( self, event ):
event.Skip()
``` |
{
"source": "jiegzhan/build-elasticsearch-index",
"score": 3
} |
#### File: jiegzhan/build-elasticsearch-index/search_index.py
```python
import sys
import pandas as pd
from pprint import pprint
from elasticsearch import Elasticsearch
host = [{'host': 'localhost', 'port': 9200}]
index_name, index_type = 'news_index', 'news_type'
es = Elasticsearch(host)
def match_all():
"""Match all the documents in the index"""
query = {'query': {'match_all': {}}}
return query
def search_index(query):
requests = []
header = {'index': index_name, 'doc_type': index_type}
requests.extend([header, query])
result = es.msearch(body=requests)['responses'][0]
pprint(result)
if __name__ == '__main__':
search_index(match_all())
``` |
{
"source": "JiehangXie/E4_cvxEDA",
"score": 3
} |
#### File: JiehangXie/E4_cvxEDA/AccelerometerFeatureExtractionScript.py
```python
import numpy as np
import pandas as pd
import scipy.signal as scisig
import os
import matplotlib.pyplot as plt
from load_files import getInputLoadFile, getOutputPath, get_user_input
DEBUG = True
SAMPLING_RATE = 8
ONE_MINUTE_S = 60
THIRTY_MIN_S = ONE_MINUTE_S*30
SECONDS_IN_DAY = 24*60*60
STILLNESS_MOTION_THRESHOLD = .1
PERCENT_STILLNESS_THRESHOLD = .95
STEP_DIFFERENCE_THRESHOLD = 0.3
def computeAllAccelerometerFeatures(data, time_frames):
if DEBUG: print("\t\tcomputing motion...")
motion = computeMotion(data['AccelX'], data['AccelY'], data['AccelZ'])
if DEBUG: print("\t\tcomputing steps...")
steps = computeSteps(motion)
if DEBUG: print("\t\tcomputing stillness...")
stillness = computeStillness(motion)
features = []
for time_frame in time_frames:
start = time_frame[0]
end = time_frame[1]
start1Hz = int(start / SAMPLING_RATE)
end1Hz = end if end == -1 else int(end / SAMPLING_RATE)
if DEBUG: print("\t\tcomputing features for time frame. Start index: "+ str(start)+ " end index: "+ str(end))
time_frame_feats = computeAccelerometerFeaturesOverOneTimeFrame(motion[start:end],
steps[start:end],
stillness[start1Hz:end1Hz])
features.append(time_frame_feats)
return features, steps, motion
def computeMotion(acc1, acc2, acc3):
'''Aggregates 3-axis accelerometer signal into a single motion signal'''
return np.sqrt(np.array(acc1)**2 + np.array(acc2)**2 + np.array(acc3)**2)
def computeSteps(motion):
'''Determines the location of steps from the aggregated accelerometer signal.
Signal is low-pass filtered, then minimums are located in the signal. For each
min, if the max absolute derivative (first difference) immediately surrounding
it is greater than a threshold, it is counted as a step.
Args:
motion: root mean squared 3 axis acceleration
Returns:
steps: binary array at 8Hz which is 1 everywhere there is a step'''
filtered_signal = filterSignalFIR(motion, 2, 256)
diff = filtered_signal[1:]-filtered_signal[:-1]
mins = scisig.argrelextrema(filtered_signal, np.less)[0]
steps = [0] * len(filtered_signal)
for m in mins:
if m <= 4 or m >= len(diff) - 4:
continue
if max(abs(diff[m-4:m+4])) > STEP_DIFFERENCE_THRESHOLD:
steps[m] = 1.0
return steps
def filterSignalFIR(eda, cutoff=0.4, numtaps=64):
f = cutoff/(SAMPLING_RATE/2.0)
FIR_coeff = scisig.firwin(numtaps,f)
return scisig.lfilter(FIR_coeff,1,eda)
def computeStillness(motion):
'''Locates periods in which the person is still or motionless.
Total acceleration must be less than a threshold for 95 percent of one
minute in order for that minute to count as still
Args:
motion: an array containing the root mean squared acceleration
Returns:
A 1Hz array that is 1 for each second belonging to a still period, 0 otherwise
'''
diff = motion[1:]-motion[:-1]
momentary_stillness = diff < STILLNESS_MOTION_THRESHOLD
np.append(momentary_stillness,0) # to ensure list is the same size as the full day signal
num_minutes_in_day = 24*60
#create array indicating whether person was still or not for each second of the day
#to be still the momentary_stillness signal must be true for more than 95% of the minute
#containing that second
second_stillness = [0]*SECONDS_IN_DAY
for i in range(num_minutes_in_day):
hours_start = int(i / 60)
mins_start = i % 60
hours_end = int((i+1) / 60)
mins_end = (i+1) % 60
start_idx = getIndexFromTimestamp(hours_start, mins_start)
end_idx = getIndexFromTimestamp(hours_end, mins_end)
this_minute = momentary_stillness[start_idx:end_idx]
minute_stillness = sum(this_minute) > PERCENT_STILLNESS_THRESHOLD*(60*SAMPLING_RATE)
second_idx = int(start_idx/8)
for si in range(second_idx,second_idx+60):
second_stillness[si] = float(minute_stillness)
return second_stillness
def computeAccelerometerFeaturesOverOneTimeFrame(motion, steps, stillness):
''' Computes all available features for a time period. Incoming signals are assumed to be from
only that time period.
Args:
motion: 8Hz root mean squared 3 axis acceleration
steps: 8Hz binary signal that is 1 if there is a step
stillness: 1Hz 1 if the person was still during this second, 0 otherwise
Returns:
A list of features containing (in order):
-Step count number of steps detected
-mean step time during movement average number of samples between two steps (aggregated first to 1 minute,
then we take the mean of only the parts of this signal occuring during movement)
-percent stillness percentage of time the person spent nearly motionless
'''
features = []
features.extend(computeStepFeatures(steps,stillness))
features.append(countStillness(stillness))
return features
def computeStepFeatures(steps,stillness):
'''Counts the total number of steps over a given period,
as well as the average time between steps (meant to approximate walking speed)
Args:
steps: an binary array at 8 Hz that is 1 every time there is a step
Returns:
sum: the number of steps in a period
median time: average number of samples between two steps'''
sum_steps = float(sum(steps))
step_indices = np.nonzero(steps)[0]
diff = step_indices[1:]-step_indices[:-1]
#ensure length of step difference array is the same so we can get the actual locations of step differences
timed_step_diff = np.empty(len(steps)) * np.nan
timed_step_diff[step_indices[:len(diff)]] = diff
signal_length_1s = len(stillness)
signal_length_1min = int(signal_length_1s / 60)
# if there aren't enough steps during this period, cannot accurately compute mean step diff
if len(timed_step_diff) < signal_length_1min:
return [sum_steps, np.nan]
agg_stillness = aggregateSignal(stillness, signal_length_1min, 'max')
agg_step_diff = aggregateSignal(timed_step_diff, signal_length_1min, 'mean')
movement_indices = [i for i in range(len(agg_stillness)) if agg_stillness[i] == 0.0]
step_diff_during_movement = agg_step_diff[movement_indices]
return [sum_steps,round(np.nanmean(step_diff_during_movement),10)]
def countStillness(stillness):
'''Counts the total percentage of time spent still over a period
Args:
stillness: an binary array at 1Hz that is 1 if that second is part of a still period
Returns:
the percentage time spent still over a period'''
return float(sum(stillness)) / float(len(stillness))
def aggregateSignal(signal, new_signal_length, agg_method='sum'):
new_signal = np.zeros(new_signal_length)
samples_per_bucket = int(len(signal) / new_signal_length)
#the new signal length must be large enough that there is at least 1 sample per bucket
assert(samples_per_bucket > 0)
for i in range(new_signal_length):
if agg_method == 'sum':
new_signal[i] = np.nansum(signal[i*samples_per_bucket:(i+1)*samples_per_bucket])
elif agg_method == 'percent':
new_signal[i] = np.nansum(signal[i*samples_per_bucket:(i+1)*samples_per_bucket]) / samples_per_bucket
elif agg_method == 'mean':
new_signal[i] = np.nanmean(signal[i*samples_per_bucket:(i+1)*samples_per_bucket])
elif agg_method == 'max':
new_signal[i] = np.nanmax(signal[i*samples_per_bucket:(i+1)*samples_per_bucket])
return new_signal
def getIndexFromTimestamp(hours, mins=0):
return ((hours * 60) + mins) * 60 * SAMPLING_RATE
def inputTimeFrames():
'''Allows user to choose the time frames over which they compute accelerometer features.'''
time_frames = []
print("Accelerometer features can be extracted over different time periods.")
cont = get_user_input("If you would like to enter a time period over which to compute features, enter 'y', or press enter to compute features over the entire file.")
while cont == 'y' or cont == 'Y':
start = int(get_user_input("Enter the starting hour of the time period (hour 0 is when the file starts):"))
end = int(get_user_input("Enter the ending hour of the time period (hour 0 is when the file starts; use -1 for the end of the file):"))
start = getIndexFromTimestamp(int(start))
if end != -1:
end = getIndexFromTimestamp(int(end))
time_frames.append([start,end])
print("Great! Now computing features for the following time periods:"+ str(time_frames))
cont = get_user_input("To add another time period, enter 'y'. To finish, press enter.")
if len(time_frames) == 0:
time_frames = [[0,-1]] # the whole file
return time_frames
def saveFeaturesToFile(features, time_frames, output_file):
of = open(output_file, 'w')
of.write("Time period start hour, Time period end hour, Step count, Mean step time during movement, Percent stillness\n")
tf_i = 0
for tf in time_frames:
output_str = str(tf[0]) + ' , ' + str(tf[1])
for feat in features[tf_i]:
output_str += ' , ' + str(feat)
tf_i += 1
of.write(output_str + '\n')
of.close()
print("Saved features to file"+ output_file)
# draws a graph of the data with the peaks marked on it
# assumes that 'data' dataframe already contains the 'peaks' column
def plotSteps(data, x_seconds, sampleRate = SAMPLING_RATE):
if x_seconds:
time_m = np.arange(0,len(data))/float(sampleRate)
realign = 128/(sampleRate)
else:
time_m = np.arange(0,len(data))/(sampleRate*60.)
realign = 128/(sampleRate*60.)
data_min = data['motion'].min()
data_max = data['motion'].max()
#Plot the data with the Peaks marked
plt.figure(1,figsize=(20, 5))
plt.plot(time_m,data['motion'])
for i in range(len(data)):
if data.iloc[i]["steps"]==1:
x_loc = time_m[i] - realign
plt.plot([x_loc,x_loc],[data_min,data_max],"k")
step_height = data_max * 1.15
#data['steps_plot'] = data['steps'] * step_height
#plt.plot(time_m,data['steps_plot'],'k')
plt.xlim([0,time_m[-1]])
plt.ylim([data_min-.1,data_max+.1])
plt.title('Motion with Detected "Steps" marked')
plt.ylabel('g')
if x_seconds:
plt.xlabel('Time (s)')
else:
plt.xlabel('Time (min)')
plt.show()
if __name__ == "__main__":
print("This script will extract features related to accelerometer data.")
data, filepath_confirm = getInputLoadFile()
output_path = getOutputPath()
time_frames = inputTimeFrames()
features, steps, motion = computeAllAccelerometerFeatures(data, time_frames)
data["steps"] = steps
data["motion"] = motion
saveFeaturesToFile(features, time_frames, output_path)
print("")
plot_ans = get_user_input("Do you want to plot the detected steps? (y/n): ")
if 'y' in plot_ans:
secs_ans = get_user_input("Would you like the x-axis to be in seconds or minutes? (sec/min): ")
if 'sec' in secs_ans:
x_seconds=True
else:
x_seconds=False
plotSteps(data, x_seconds)
else:
print("\tOkay, script will not produce a plot")
``` |
{
"source": "JiehangXie/PaddleSpeech",
"score": 3
} |
#### File: cc-cedict/local/parser.py
```python
import json
import sys
# usage: bin ccedict dump.json
with open(sys.argv[1], 'rt') as file:
text = file.read()
lines = text.split('\n')
dict_lines = list(lines)
def parse_line(line):
parsed = {}
if line == '':
dict_lines.remove(line)
return 0
if line.startswith('#'):
return 0
if line.startswith('%'):
return 0
line = line.rstrip('/')
line = line.split('/')
if len(line) <= 1:
return 0
english = line[1]
char_and_pinyin = line[0].split('[')
characters = char_and_pinyin[0]
characters = characters.split()
traditional = characters[0]
simplified = characters[1]
pinyin = char_and_pinyin[1]
pinyin = pinyin.rstrip()
pinyin = pinyin.rstrip("]")
parsed['traditional'] = traditional
parsed['simplified'] = simplified
parsed['pinyin'] = pinyin
parsed['english'] = english
list_of_dicts.append(parsed)
def remove_surnames():
for x in range(len(list_of_dicts) - 1, -1, -1):
if "surname " in list_of_dicts[x]['english']:
if list_of_dicts[x]['traditional'] == list_of_dicts[x + 1][
'traditional']:
list_of_dicts.pop(x)
def main():
#make each line into a dictionary
print("Parsing dictionary . . .")
for line in dict_lines:
parse_line(line)
#remove entries for surnames from the data (optional):
print("Removing Surnames . . .")
remove_surnames()
print("Saving to database (this may take a few minutes) . . .")
with open(sys.argv[2], 'wt') as fout:
for one_dict in list_of_dicts:
json_str = json.dumps(one_dict)
fout.write(json_str + "\n")
print('Done!')
list_of_dicts = []
parsed_dict = main()
```
#### File: st1/local/espnet_json_to_manifest.py
```python
import argparse
import json
def main(args):
with open(args.json_file, 'r') as fin:
data_json = json.load(fin)
with open(args.manifest_file, 'w') as fout:
for key, value in data_json['utts'].items():
value['utt'] = key
fout.write(json.dumps(value, ensure_ascii=False))
fout.write("\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--json-file', type=str, default=None, help="espnet data json file.")
parser.add_argument(
'--manifest-file',
type=str,
default='manifest.train',
help='manifest data json line file.')
args = parser.parse_args()
main(args)
```
#### File: paddleaudio/features/window.py
```python
import math
from typing import List
from typing import Tuple
from typing import Union
import paddle
from paddle import Tensor
__all__ = [
'get_window',
]
def _cat(a: List[Tensor], data_type: str) -> Tensor:
l = [paddle.to_tensor(_a, data_type) for _a in a]
return paddle.concat(l)
def _acosh(x: Union[Tensor, float]) -> Tensor:
if isinstance(x, float):
return math.log(x + math.sqrt(x**2 - 1))
return paddle.log(x + paddle.sqrt(paddle.square(x) - 1))
def _extend(M: int, sym: bool) -> bool:
"""Extend window by 1 sample if needed for DFT-even symmetry"""
if not sym:
return M + 1, True
else:
return M, False
def _len_guards(M: int) -> bool:
"""Handle small or incorrect window lengths"""
if int(M) != M or M < 0:
raise ValueError('Window length M must be a non-negative integer')
return M <= 1
def _truncate(w: Tensor, needed: bool) -> Tensor:
"""Truncate window by 1 sample if needed for DFT-even symmetry"""
if needed:
return w[:-1]
else:
return w
def general_gaussian(M: int, p, sig, sym: bool=True,
dtype: str='float64') -> Tensor:
"""Compute a window with a generalized Gaussian shape.
This function is consistent with scipy.signal.windows.general_gaussian().
"""
if _len_guards(M):
return paddle.ones((M, ), dtype=dtype)
M, needs_trunc = _extend(M, sym)
n = paddle.arange(0, M, dtype=dtype) - (M - 1.0) / 2.0
w = paddle.exp(-0.5 * paddle.abs(n / sig)**(2 * p))
return _truncate(w, needs_trunc)
def general_hamming(M: int, alpha: float, sym: bool=True,
dtype: str='float64') -> Tensor:
"""Compute a generalized Hamming window.
This function is consistent with scipy.signal.windows.general_hamming()
"""
return general_cosine(M, [alpha, 1. - alpha], sym, dtype=dtype)
def taylor(M: int,
nbar=4,
sll=30,
norm=True,
sym: bool=True,
dtype: str='float64') -> Tensor:
"""Compute a Taylor window.
The Taylor window taper function approximates the Dolph-Chebyshev window's
constant sidelobe level for a parameterized number of near-in sidelobes.
Parameters:
M(int): window size
nbar, sil, norm: the window-specific parameter.
sym(bool):whether to return symmetric window.
The default value is True
dtype(str): the datatype of returned tensor.
Returns:
Tensor: the window tensor
"""
if _len_guards(M):
return paddle.ones((M, ), dtype=dtype)
M, needs_trunc = _extend(M, sym)
# Original text uses a negative sidelobe level parameter and then negates
# it in the calculation of B. To keep consistent with other methods we
# assume the sidelobe level parameter to be positive.
B = 10**(sll / 20)
A = _acosh(B) / math.pi
s2 = nbar**2 / (A**2 + (nbar - 0.5)**2)
ma = paddle.arange(1, nbar, dtype=dtype)
Fm = paddle.empty((nbar - 1, ), dtype=dtype)
signs = paddle.empty_like(ma)
signs[::2] = 1
signs[1::2] = -1
m2 = ma * ma
for mi in range(len(ma)):
numer = signs[mi] * paddle.prod(1 - m2[mi] / s2 / (A**2 + (ma - 0.5)**2
))
if mi == 0:
denom = 2 * paddle.prod(1 - m2[mi] / m2[mi + 1:])
elif mi == len(ma) - 1:
denom = 2 * paddle.prod(1 - m2[mi] / m2[:mi])
else:
denom = 2 * paddle.prod(1 - m2[mi] / m2[:mi]) * paddle.prod(1 - m2[
mi] / m2[mi + 1:])
Fm[mi] = numer / denom
def W(n):
return 1 + 2 * paddle.matmul(
Fm.unsqueeze(0),
paddle.cos(2 * math.pi * ma.unsqueeze(1) * (n - M / 2. + 0.5) / M))
w = W(paddle.arange(0, M, dtype=dtype))
# normalize (Note that this is not described in the original text [1])
if norm:
scale = 1.0 / W((M - 1) / 2)
w *= scale
w = w.squeeze()
return _truncate(w, needs_trunc)
def general_cosine(M: int, a: float, sym: bool=True,
dtype: str='float64') -> Tensor:
"""Compute a generic weighted sum of cosine terms window.
This function is consistent with scipy.signal.windows.general_cosine().
"""
if _len_guards(M):
return paddle.ones((M, ), dtype=dtype)
M, needs_trunc = _extend(M, sym)
fac = paddle.linspace(-math.pi, math.pi, M, dtype=dtype)
w = paddle.zeros((M, ), dtype=dtype)
for k in range(len(a)):
w += a[k] * paddle.cos(k * fac)
return _truncate(w, needs_trunc)
def hamming(M: int, sym: bool=True, dtype: str='float64') -> Tensor:
"""Compute a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters:
M(int): window size
sym(bool):whether to return symmetric window.
The default value is True
dtype(str): the datatype of returned tensor.
Returns:
Tensor: the window tensor
"""
return general_hamming(M, 0.54, sym, dtype=dtype)
def hann(M: int, sym: bool=True, dtype: str='float64') -> Tensor:
"""Compute a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters:
M(int): window size
sym(bool):whether to return symmetric window.
The default value is True
dtype(str): the datatype of returned tensor.
Returns:
Tensor: the window tensor
"""
return general_hamming(M, 0.5, sym, dtype=dtype)
def tukey(M: int, alpha=0.5, sym: bool=True, dtype: str='float64') -> Tensor:
"""Compute a Tukey window.
The Tukey window is also known as a tapered cosine window.
Parameters:
M(int): window size
sym(bool):whether to return symmetric window.
The default value is True
dtype(str): the datatype of returned tensor.
Returns:
Tensor: the window tensor
"""
if _len_guards(M):
return paddle.ones((M, ), dtype=dtype)
if alpha <= 0:
return paddle.ones((M, ), dtype=dtype)
elif alpha >= 1.0:
return hann(M, sym=sym)
M, needs_trunc = _extend(M, sym)
n = paddle.arange(0, M, dtype=dtype)
width = int(alpha * (M - 1) / 2.0)
n1 = n[0:width + 1]
n2 = n[width + 1:M - width - 1]
n3 = n[M - width - 1:]
w1 = 0.5 * (1 + paddle.cos(math.pi * (-1 + 2.0 * n1 / alpha / (M - 1))))
w2 = paddle.ones(n2.shape, dtype=dtype)
w3 = 0.5 * (1 + paddle.cos(math.pi * (-2.0 / alpha + 1 + 2.0 * n3 / alpha /
(M - 1))))
w = paddle.concat([w1, w2, w3])
return _truncate(w, needs_trunc)
def kaiser(M: int, beta: float, sym: bool=True, dtype: str='float64') -> Tensor:
"""Compute a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters:
M(int): window size.
beta(float): the window-specific parameter.
sym(bool):whether to return symmetric window.
The default value is True
Returns:
Tensor: the window tensor
"""
raise NotImplementedError()
def gaussian(M: int, std: float, sym: bool=True,
dtype: str='float64') -> Tensor:
"""Compute a Gaussian window.
The Gaussian widows has a Gaussian shape defined by the standard deviation(std).
Parameters:
M(int): window size.
std(float): the window-specific parameter.
sym(bool):whether to return symmetric window.
The default value is True
dtype(str): the datatype of returned tensor.
Returns:
Tensor: the window tensor
"""
if _len_guards(M):
return paddle.ones((M, ), dtype=dtype)
M, needs_trunc = _extend(M, sym)
n = paddle.arange(0, M, dtype=dtype) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = paddle.exp(-n**2 / sig2)
return _truncate(w, needs_trunc)
def exponential(M: int,
center=None,
tau=1.,
sym: bool=True,
dtype: str='float64') -> Tensor:
"""Compute an exponential (or Poisson) window.
Parameters:
M(int): window size.
tau(float): the window-specific parameter.
sym(bool):whether to return symmetric window.
The default value is True
dtype(str): the datatype of returned tensor.
Returns:
Tensor: the window tensor
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if _len_guards(M):
return paddle.ones((M, ), dtype=dtype)
M, needs_trunc = _extend(M, sym)
if center is None:
center = (M - 1) / 2
n = paddle.arange(0, M, dtype=dtype)
w = paddle.exp(-paddle.abs(n - center) / tau)
return _truncate(w, needs_trunc)
def triang(M: int, sym: bool=True, dtype: str='float64') -> Tensor:
"""Compute a triangular window.
Parameters:
M(int): window size.
sym(bool):whether to return symmetric window.
The default value is True
dtype(str): the datatype of returned tensor.
Returns:
Tensor: the window tensor
"""
if _len_guards(M):
return paddle.ones((M, ), dtype=dtype)
M, needs_trunc = _extend(M, sym)
n = paddle.arange(1, (M + 1) // 2 + 1, dtype=dtype)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = paddle.concat([w, w[::-1]])
else:
w = 2 * n / (M + 1.0)
w = paddle.concat([w, w[-2::-1]])
return _truncate(w, needs_trunc)
def bohman(M: int, sym: bool=True, dtype: str='float64') -> Tensor:
"""Compute a Bohman window.
The Bohman window is the autocorrelation of a cosine window.
Parameters:
M(int): window size.
sym(bool):whether to return symmetric window.
The default value is True
dtype(str): the datatype of returned tensor.
Returns:
Tensor: the window tensor
"""
if _len_guards(M):
return paddle.ones((M, ), dtype=dtype)
M, needs_trunc = _extend(M, sym)
fac = paddle.abs(paddle.linspace(-1, 1, M, dtype=dtype)[1:-1])
w = (1 - fac) * paddle.cos(math.pi * fac) + 1.0 / math.pi * paddle.sin(
math.pi * fac)
w = _cat([0, w, 0], dtype)
return _truncate(w, needs_trunc)
def blackman(M: int, sym: bool=True, dtype: str='float64') -> Tensor:
"""Compute a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters:
M(int): window size.
sym(bool):whether to return symmetric window.
The default value is True
dtype(str): the datatype of returned tensor.
Returns:
Tensor: the window tensor
"""
return general_cosine(M, [0.42, 0.50, 0.08], sym, dtype=dtype)
def cosine(M: int, sym: bool=True, dtype: str='float64') -> Tensor:
"""Compute a window with a simple cosine shape.
Parameters:
M(int): window size.
sym(bool):whether to return symmetric window.
The default value is True
dtype(str): the datatype of returned tensor.
Returns:
Tensor: the window tensor
"""
if _len_guards(M):
return paddle.ones((M, ), dtype=dtype)
M, needs_trunc = _extend(M, sym)
w = paddle.sin(math.pi / M * (paddle.arange(0, M, dtype=dtype) + .5))
return _truncate(w, needs_trunc)
def get_window(window: Union[str, Tuple[str, float]],
win_length: int,
fftbins: bool=True,
dtype: str='float64') -> Tensor:
"""Return a window of a given length and type.
Parameters:
window(str|(str,float)): the type of window to create.
win_length(int): the number of samples in the window.
fftbins(bool): If True, create a "periodic" window. Otherwise,
create a "symmetric" window, for use in filter design.
Returns:
The window represented as a tensor.
"""
sym = not fftbins
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, str):
if window in ['gaussian', 'exponential']:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = eval(winstr)
except KeyError as e:
raise ValueError("Unknown window type.") from e
params = (win_length, ) + args
kwargs = {'sym': sym}
return winfunc(*params, dtype=dtype, **kwargs)
```
#### File: paddlespeech/cli/log.py
```python
import functools
import logging
__all__ = [
'logger',
]
class Logger(object):
def __init__(self, name: str=None):
name = 'PaddleSpeech' if not name else name
self.logger = logging.getLogger(name)
log_config = {
'DEBUG': 10,
'INFO': 20,
'TRAIN': 21,
'EVAL': 22,
'WARNING': 30,
'ERROR': 40,
'CRITICAL': 50,
'EXCEPTION': 100,
}
for key, level in log_config.items():
logging.addLevelName(level, key)
if key == 'EXCEPTION':
self.__dict__[key.lower()] = self.logger.exception
else:
self.__dict__[key.lower()] = functools.partial(self.__call__,
level)
self.format = logging.Formatter(
fmt='[%(asctime)-15s] [%(levelname)8s] - %(message)s')
self.handler = logging.StreamHandler()
self.handler.setFormatter(self.format)
self.logger.addHandler(self.handler)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
def __call__(self, log_level: str, msg: str):
self.logger.log(log_level, msg)
logger = Logger()
```
#### File: transformer/bin/cacu_perplexity.py
```python
import sys
import configargparse
def get_parser():
"""Get default arguments."""
parser = configargparse.ArgumentParser(
description="The parser for caculating the perplexity of transformer language model ",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter, )
parser.add_argument(
"--rnnlm", type=str, default=None, help="RNNLM model file to read")
parser.add_argument(
"--rnnlm-conf",
type=str,
default=None,
help="RNNLM model config file to read")
parser.add_argument(
"--vocab_path",
type=str,
default=None,
help="vocab path to for token2id")
parser.add_argument(
"--bpeprefix",
type=str,
default=None,
help="The path of bpeprefix for loading")
parser.add_argument(
"--text_path",
type=str,
default=None,
help="The path of text file for testing ")
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpu to use, 0 for using cpu instead")
parser.add_argument(
"--dtype",
choices=("float16", "float32", "float64"),
default="float32",
help="Float precision (only available in --api v2)", )
parser.add_argument(
"--output_dir",
type=str,
default=".",
help="The output directory to store the sentence PPL")
return parser
def main(args):
parser = get_parser()
args = parser.parse_args(args)
from paddlespeech.s2t.exps.lm.transformer.lm_cacu_perplexity import run_get_perplexity
run_get_perplexity(args)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: frontend/augmentor/augmentation.py
```python
import json
import os
from collections.abc import Sequence
from inspect import signature
from pprint import pformat
import numpy as np
from paddlespeech.s2t.frontend.augmentor.base import AugmentorBase
from paddlespeech.s2t.utils.dynamic_import import dynamic_import
from paddlespeech.s2t.utils.log import Log
logger = Log(__name__).getlog()
__all__ = ["AugmentationPipeline"]
import_alias = dict(
volume="paddlespeech.s2t.frontend.augmentor.impulse_response:VolumePerturbAugmentor",
shift="paddlespeech.s2t.frontend.augmentor.shift_perturb:ShiftPerturbAugmentor",
speed="paddlespeech.s2t.frontend.augmentor.speed_perturb:SpeedPerturbAugmentor",
resample="paddlespeech.s2t.frontend.augmentor.resample:ResampleAugmentor",
bayesian_normal="paddlespeech.s2t.frontend.augmentor.online_bayesian_normalization:OnlineBayesianNormalizationAugmentor",
noise="paddlespeech.s2t.frontend.augmentor.noise_perturb:NoisePerturbAugmentor",
impulse="paddlespeech.s2t.frontend.augmentor.impulse_response:ImpulseResponseAugmentor",
specaug="paddlespeech.s2t.frontend.augmentor.spec_augment:SpecAugmentor", )
class AugmentationPipeline():
"""Build a pre-processing pipeline with various augmentation models.Such a
data augmentation pipeline is oftern leveraged to augment the training
samples to make the model invariant to certain types of perturbations in the
real world, improving model's generalization ability.
The pipeline is built according the the augmentation configuration in json
string, e.g.
.. code-block::
[ {
"type": "noise",
"params": {"min_snr_dB": 10,
"max_snr_dB": 20,
"noise_manifest_path": "datasets/manifest.noise"},
"prob": 0.0
},
{
"type": "speed",
"params": {"min_speed_rate": 0.9,
"max_speed_rate": 1.1},
"prob": 1.0
},
{
"type": "shift",
"params": {"min_shift_ms": -5,
"max_shift_ms": 5},
"prob": 1.0
},
{
"type": "volume",
"params": {"min_gain_dBFS": -10,
"max_gain_dBFS": 10},
"prob": 0.0
},
{
"type": "bayesian_normal",
"params": {"target_db": -20,
"prior_db": -20,
"prior_samples": 100},
"prob": 0.0
}
]
This augmentation configuration inserts two augmentation models
into the pipeline, with one is VolumePerturbAugmentor and the other
SpeedPerturbAugmentor. "prob" indicates the probability of the current
augmentor to take effect. If "prob" is zero, the augmentor does not take
effect.
Params:
preprocess_conf(str): Augmentation configuration in `json file` or `json string`.
random_seed(int): Random seed.
Raises:
ValueError: If the augmentation json config is in incorrect format".
"""
SPEC_TYPES = {'specaug'}
def __init__(self, preprocess_conf: str, random_seed: int=0):
self._rng = np.random.RandomState(random_seed)
self.conf = {'mode': 'sequential', 'process': []}
if preprocess_conf:
if os.path.isfile(preprocess_conf):
# json file
with open(preprocess_conf, 'r') as fin:
json_string = fin.read()
else:
# json string
json_string = preprocess_conf
process = json.loads(json_string)
self.conf['process'] += process
self._augmentors, self._rates = self._parse_pipeline_from('all')
self._audio_augmentors, self._audio_rates = self._parse_pipeline_from(
'audio')
self._spec_augmentors, self._spec_rates = self._parse_pipeline_from(
'feature')
logger.info(
f"Augmentation: {pformat(list(zip(self._augmentors, self._rates)))}")
def __call__(self, xs, uttid_list=None, **kwargs):
if not isinstance(xs, Sequence):
is_batch = False
xs = [xs]
else:
is_batch = True
if isinstance(uttid_list, str):
uttid_list = [uttid_list for _ in range(len(xs))]
if self.conf.get("mode", "sequential") == "sequential":
for idx, (func, rate) in enumerate(
zip(self._augmentors, self._rates), 0):
if self._rng.uniform(0., 1.) >= rate:
continue
# Derive only the args which the func has
try:
param = signature(func).parameters
except ValueError:
# Some function, e.g. built-in function, are failed
param = {}
_kwargs = {k: v for k, v in kwargs.items() if k in param}
try:
if uttid_list is not None and "uttid" in param:
xs = [
func(x, u, **_kwargs)
for x, u in zip(xs, uttid_list)
]
else:
xs = [func(x, **_kwargs) for x in xs]
except Exception:
logger.fatal("Catch a exception from {}th func: {}".format(
idx, func))
raise
else:
raise NotImplementedError(
"Not supporting mode={}".format(self.conf["mode"]))
if is_batch:
return xs
else:
return xs[0]
def transform_audio(self, audio_segment):
"""Run the pre-processing pipeline for data augmentation.
Note that this is an in-place transformation.
:param audio_segment: Audio segment to process.
:type audio_segment: AudioSegmenet|SpeechSegment
"""
for augmentor, rate in zip(self._audio_augmentors, self._audio_rates):
if self._rng.uniform(0., 1.) < rate:
augmentor.transform_audio(audio_segment)
def transform_feature(self, spec_segment):
"""spectrogram augmentation.
Args:
spec_segment (np.ndarray): audio feature, (D, T).
"""
for augmentor, rate in zip(self._spec_augmentors, self._spec_rates):
if self._rng.uniform(0., 1.) < rate:
spec_segment = augmentor.transform_feature(spec_segment)
return spec_segment
def _parse_pipeline_from(self, aug_type='all'):
"""Parse the config json to build a augmentation pipelien."""
assert aug_type in ('audio', 'feature', 'all'), aug_type
audio_confs = []
feature_confs = []
all_confs = []
for config in self.conf['process']:
all_confs.append(config)
if config["type"] in self.SPEC_TYPES:
feature_confs.append(config)
else:
audio_confs.append(config)
if aug_type == 'audio':
aug_confs = audio_confs
elif aug_type == 'feature':
aug_confs = feature_confs
elif aug_type == 'all':
aug_confs = all_confs
else:
raise ValueError(f"Not support: {aug_type}")
augmentors = [
self._get_augmentor(config["type"], config["params"])
for config in aug_confs
]
rates = [config["prob"] for config in aug_confs]
return augmentors, rates
def _get_augmentor(self, augmentor_type, params):
"""Return an augmentation model by the type name, and pass in params."""
class_obj = dynamic_import(augmentor_type, import_alias)
assert issubclass(class_obj, AugmentorBase)
try:
obj = class_obj(self._rng, **params)
except Exception:
raise ValueError("Unknown augmentor type [%s]." % augmentor_type)
return obj
```
#### File: s2t/io/sampler.py
```python
import math
import numpy as np
from paddle import distributed as dist
from paddle.io import BatchSampler
from paddle.io import DistributedBatchSampler
from paddlespeech.s2t.utils.log import Log
logger = Log(__name__).getlog()
__all__ = [
"SortagradDistributedBatchSampler",
"SortagradBatchSampler",
]
def _batch_shuffle(indices, batch_size, epoch, clipped=False):
"""Put similarly-sized instances into minibatches for better efficiency
and make a batch-wise shuffle.
1. Sort the audio clips by duration.
2. Generate a random number `k`, k in [0, batch_size).
3. Randomly shift `k` instances in order to create different batches
for different epochs. Create minibatches.
4. Shuffle the minibatches.
:param indices: indexes. List of int.
:type indices: list
:param batch_size: Batch size. This size is also used for generate
a random number for batch shuffle.
:type batch_size: int
:param clipped: Whether to clip the heading (small shift) and trailing
(incomplete batch) instances.
:type clipped: bool
:return: Batch shuffled mainifest.
:rtype: list
"""
rng = np.random.RandomState(epoch)
shift_len = rng.randint(0, batch_size - 1)
batch_indices = list(zip(* [iter(indices[shift_len:])] * batch_size))
rng.shuffle(batch_indices)
batch_indices = [item for batch in batch_indices for item in batch]
assert clipped is False
if not clipped:
res_len = len(indices) - shift_len - len(batch_indices)
# when res_len is 0, will return whole list, len(List[-0:]) = len(List[:])
if res_len != 0:
batch_indices.extend(indices[-res_len:])
batch_indices.extend(indices[0:shift_len])
assert len(indices) == len(
batch_indices
), f"_batch_shuffle: {len(indices)} : {len(batch_indices)} : {res_len} - {shift_len}"
return batch_indices
class SortagradDistributedBatchSampler(DistributedBatchSampler):
def __init__(self,
dataset,
batch_size,
num_replicas=None,
rank=None,
shuffle=False,
drop_last=False,
sortagrad=False,
shuffle_method="batch_shuffle"):
"""Sortagrad Sampler for multi gpus.
Args:
dataset (paddle.io.Dataset):
batch_size (int): batch size for one gpu
num_replicas (int, optional): world size or numbers of gpus. Defaults to None.
rank (int, optional): rank id. Defaults to None.
shuffle (bool, optional): True for do shuffle, or else. Defaults to False.
drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.
sortagrad (bool, optional): True, do sortgrad in first epoch, then shuffle as usual; or else. Defaults to False.
shuffle_method (str, optional): shuffle method, "instance_shuffle" or "batch_shuffle". Defaults to "batch_shuffle".
"""
super().__init__(dataset, batch_size, num_replicas, rank, shuffle,
drop_last)
self._sortagrad = sortagrad
self._shuffle_method = shuffle_method
def __iter__(self):
num_samples = len(self.dataset)
indices = np.arange(num_samples).tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# sort (by duration) or batch-wise shuffle the manifest
if self.shuffle:
if self.epoch == 0 and self._sortagrad:
logger.info(
f'rank: {dist.get_rank()} dataset sortagrad! epoch {self.epoch}'
)
else:
logger.info(
f'rank: {dist.get_rank()} dataset shuffle! epoch {self.epoch}'
)
if self._shuffle_method == "batch_shuffle":
# using `batch_size * nrank`, or will cause instability loss and nan or inf grad,
# since diff batch examlpe length in batches case instability loss in diff rank,
# e.g. rank0 maxlength 20, rank3 maxlength 1000
indices = _batch_shuffle(
indices,
self.batch_size * self.nranks,
self.epoch,
clipped=False)
elif self._shuffle_method == "instance_shuffle":
np.random.RandomState(self.epoch).shuffle(indices)
else:
raise ValueError("Unknown shuffle method %s." %
self._shuffle_method)
assert len(
indices
) == self.total_size, f"batch shuffle examples error: {len(indices)} : {self.total_size}"
# slice `self.batch_size` examples by rank id
def _get_indices_by_batch_size(indices):
subsampled_indices = []
last_batch_size = self.total_size % (self.batch_size * self.nranks)
assert last_batch_size % self.nranks == 0
last_local_batch_size = last_batch_size // self.nranks
for i in range(self.local_rank * self.batch_size,
len(indices) - last_batch_size,
self.batch_size * self.nranks):
subsampled_indices.extend(indices[i:i + self.batch_size])
indices = indices[len(indices) - last_batch_size:]
subsampled_indices.extend(
indices[self.local_rank * last_local_batch_size:(
self.local_rank + 1) * last_local_batch_size])
return subsampled_indices
if self.nranks > 1:
indices = _get_indices_by_batch_size(indices)
assert len(indices) == self.num_samples
_sample_iter = iter(indices)
batch_indices = []
for idx in _sample_iter:
batch_indices.append(idx)
if len(batch_indices) == self.batch_size:
logger.debug(
f"rank: {dist.get_rank()} batch index: {batch_indices} ")
yield batch_indices
batch_indices = []
if not self.drop_last and len(batch_indices) > 0:
yield batch_indices
def __len__(self):
num_samples = self.num_samples
num_samples += int(not self.drop_last) * (self.batch_size - 1)
return num_samples // self.batch_size
class SortagradBatchSampler(BatchSampler):
def __init__(self,
dataset,
batch_size,
shuffle=False,
drop_last=False,
sortagrad=False,
shuffle_method="batch_shuffle"):
"""Sortagrad Sampler for one gpu.
Args:
dataset (paddle.io.Dataset):
batch_size (int): batch size for one gpu
shuffle (bool, optional): True for do shuffle, or else. Defaults to False.
drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.
sortagrad (bool, optional): True, do sortgrad in first epoch, then shuffle as usual; or else. Defaults to False.
shuffle_method (str, optional): shuffle method, "instance_shuffle" or "batch_shuffle". Defaults to "batch_shuffle".
"""
self.dataset = dataset
assert isinstance(batch_size, int) and batch_size > 0, \
"batch_size should be a positive integer"
self.batch_size = batch_size
assert isinstance(shuffle, bool), \
"shuffle should be a boolean value"
self.shuffle = shuffle
assert isinstance(drop_last, bool), \
"drop_last should be a boolean number"
self.drop_last = drop_last
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0))
self.total_size = self.num_samples
self._sortagrad = sortagrad
self._shuffle_method = shuffle_method
def __iter__(self):
num_samples = len(self.dataset)
indices = np.arange(num_samples).tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# sort (by duration) or batch-wise shuffle the manifest
if self.shuffle:
if self.epoch == 0 and self._sortagrad:
logger.info(f'dataset sortagrad! epoch {self.epoch}')
else:
logger.info(f'dataset shuffle! epoch {self.epoch}')
if self._shuffle_method == "batch_shuffle":
indices = _batch_shuffle(
indices, self.batch_size, self.epoch, clipped=False)
elif self._shuffle_method == "instance_shuffle":
np.random.RandomState(self.epoch).shuffle(indices)
else:
raise ValueError("Unknown shuffle method %s." %
self._shuffle_method)
assert len(
indices
) == self.total_size, f"batch shuffle examples error: {len(indices)} : {self.total_size}"
assert len(indices) == self.num_samples
_sample_iter = iter(indices)
batch_indices = []
for idx in _sample_iter:
batch_indices.append(idx)
if len(batch_indices) == self.batch_size:
logger.debug(
f"rank: {dist.get_rank()} batch index: {batch_indices} ")
yield batch_indices
batch_indices = []
if not self.drop_last and len(batch_indices) > 0:
yield batch_indices
self.epoch += 1
def __len__(self):
num_samples = self.num_samples
num_samples += int(not self.drop_last) * (self.batch_size - 1)
return num_samples // self.batch_size
```
#### File: training/extensions/evaluator.py
```python
from typing import Dict
import paddle
from paddle import distributed as dist
from paddle.io import DataLoader
from paddle.nn import Layer
from . import extension
from ..reporter import DictSummary
from ..reporter import ObsScope
from ..reporter import report
from ..timer import Timer
from paddlespeech.s2t.utils.log import Log
logger = Log(__name__).getlog()
class StandardEvaluator(extension.Extension):
trigger = (1, 'epoch')
default_name = 'validation'
priority = extension.PRIORITY_WRITER
name = None
def __init__(self, model: Layer, dataloader: DataLoader):
# it is designed to hold multiple models
models = {"main": model}
self.models: Dict[str, Layer] = models
self.model = model
# dataloaders
self.dataloader = dataloader
def evaluate_core(self, batch):
# compute
self.model(batch) # you may report here
return
def evaluate_sync(self, data):
# dist sync `evaluate_core` outputs
if data is None:
return
numerator, denominator = data
if dist.get_world_size() > 1:
numerator = paddle.to_tensor(numerator)
denominator = paddle.to_tensor(denominator)
# the default operator in all_reduce function is sum.
dist.all_reduce(numerator)
dist.all_reduce(denominator)
value = numerator / denominator
value = float(value)
else:
value = numerator / denominator
# used for `snapshort` to do kbest save.
report("VALID/LOSS", value)
logger.info(f"Valid: all-reduce loss {value}")
def evaluate(self):
# switch to eval mode
for model in self.models.values():
model.eval()
# to average evaluation metrics
summary = DictSummary()
for batch in self.dataloader:
observation = {}
with ObsScope(observation):
# main evaluation computation here.
with paddle.no_grad():
self.evaluate_sync(self.evaluate_core(batch))
summary.add(observation)
summary = summary.compute_mean()
# switch to train mode
for model in self.models.values():
model.train()
return summary
def __call__(self, trainer=None):
# evaluate and report the averaged metric to current observation
# if it is used to extend a trainer, the metrics is reported to
# to observation of the trainer
# or otherwise, you can use your own observation
with Timer("Eval Time Cost: {}"):
summary = self.evaluate()
for k, v in summary.items():
report(k, v)
```
#### File: training/extensions/__init__.py
```python
from typing import Callable
from .extension import Extension
def make_extension(trigger: Callable=None,
default_name: str=None,
priority: int=None,
finalizer: Callable=None,
initializer: Callable=None,
on_error: Callable=None):
"""Make an Extension-like object by injecting required attributes to it.
"""
if trigger is None:
trigger = Extension.trigger
if priority is None:
priority = Extension.priority
def decorator(ext):
ext.trigger = trigger
ext.default_name = default_name or ext.__name__
ext.priority = priority
ext.finalize = finalizer
ext.on_error = on_error
ext.initialize = initializer
return ext
return decorator
```
#### File: s2t/training/reporter.py
```python
import contextlib
import math
from collections import defaultdict
OBSERVATIONS = None
@contextlib.contextmanager
def ObsScope(observations):
# make `observation` the target to report to.
# it is basically a dictionary that stores temporary observations
global OBSERVATIONS
old = OBSERVATIONS
OBSERVATIONS = observations
try:
yield
finally:
OBSERVATIONS = old
def get_observations():
global OBSERVATIONS
return OBSERVATIONS
def report(name, value):
# a simple function to report named value
# you can use it everywhere, it will get the default target and writ to it
# you can think of it as std.out
observations = get_observations()
if observations is None:
return
else:
observations[name] = value
class Summary():
"""Online summarization of a sequence of scalars.
Summary computes the statistics of given scalars online.
"""
def __init__(self):
self._x = 0.0
self._x2 = 0.0
self._n = 0
def add(self, value, weight=1):
"""Adds a scalar value.
Args:
value: Scalar value to accumulate. It is either a NumPy scalar or
a zero-dimensional array (on CPU or GPU).
weight: An optional weight for the value. It is a NumPy scalar or
a zero-dimensional array (on CPU or GPU).
Default is 1 (integer).
"""
self._x += weight * value
self._x2 += weight * value * value
self._n += weight
def compute_mean(self):
"""Computes the mean."""
x, n = self._x, self._n
return x / n
def make_statistics(self):
"""Computes and returns the mean and standard deviation values.
Returns:
tuple: Mean and standard deviation values.
"""
x, n = self._x, self._n
mean = x / n
var = self._x2 / n - mean * mean
std = math.sqrt(var)
return mean, std
class DictSummary():
"""Online summarization of a sequence of dictionaries.
``DictSummary`` computes the statistics of a given set of scalars online.
It only computes the statistics for scalar values and variables of scalar
values in the dictionaries.
"""
def __init__(self):
self._summaries = defaultdict(Summary)
def add(self, d):
"""Adds a dictionary of scalars.
Args:
d (dict): Dictionary of scalars to accumulate. Only elements of
scalars, zero-dimensional arrays, and variables of
zero-dimensional arrays are accumulated. When the value
is a tuple, the second element is interpreted as a weight.
"""
summaries = self._summaries
for k, v in d.items():
w = 1
if isinstance(v, tuple):
v = v[0]
w = v[1]
summaries[k].add(v, weight=w)
def compute_mean(self):
"""Creates a dictionary of mean values.
It returns a single dictionary that holds a mean value for each entry
added to the summary.
Returns:
dict: Dictionary of mean values.
"""
return {
name: summary.compute_mean()
for name, summary in self._summaries.items()
}
def make_statistics(self):
"""Creates a dictionary of statistics.
It returns a single dictionary that holds mean and standard deviation
values for every entry added to the summary. For an entry of name
``'key'``, these values are added to the dictionary by names ``'key'``
and ``'key.std'``, respectively.
Returns:
dict: Dictionary of statistics of all entries.
"""
stats = {}
for name, summary in self._summaries.items():
mean, std = summary.make_statistics()
stats[name] = mean
stats[name + '.std'] = std
return stats
```
#### File: s2t/transform/add_deltas.py
```python
import numpy as np
def delta(feat, window):
assert window > 0
delta_feat = np.zeros_like(feat)
for i in range(1, window + 1):
delta_feat[:-i] += i * feat[i:]
delta_feat[i:] += -i * feat[:-i]
delta_feat[-i:] += i * feat[-1]
delta_feat[:i] += -i * feat[0]
delta_feat /= 2 * sum(i**2 for i in range(1, window + 1))
return delta_feat
def add_deltas(x, window=2, order=2):
"""
Args:
x (np.ndarray): speech feat, (T, D).
Return:
np.ndarray: (T, (1+order)*D)
"""
feats = [x]
for _ in range(order):
feats.append(delta(feats[-1], window))
return np.concatenate(feats, axis=1)
class AddDeltas():
def __init__(self, window=2, order=2):
self.window = window
self.order = order
def __repr__(self):
return "{name}(window={window}, order={order}".format(
name=self.__class__.__name__, window=self.window, order=self.order)
def __call__(self, x):
return add_deltas(x, window=self.window, order=self.order)
```
#### File: exps/fastspeech2/voice_cloning.py
```python
import argparse
import os
from pathlib import Path
import numpy as np
import paddle
import soundfile as sf
import yaml
from yacs.config import CfgNode
from paddlespeech.t2s.frontend.zh_frontend import Frontend
from paddlespeech.t2s.models.fastspeech2 import FastSpeech2
from paddlespeech.t2s.models.fastspeech2 import FastSpeech2Inference
from paddlespeech.t2s.models.parallel_wavegan import PWGGenerator
from paddlespeech.t2s.models.parallel_wavegan import PWGInference
from paddlespeech.t2s.modules.normalizer import ZScore
from paddlespeech.vector.exps.ge2e.audio_processor import SpeakerVerificationPreprocessor
from paddlespeech.vector.models.lstm_speaker_encoder import LSTMSpeakerEncoder
def voice_cloning(args, fastspeech2_config, pwg_config):
# speaker encoder
p = SpeakerVerificationPreprocessor(
sampling_rate=16000,
audio_norm_target_dBFS=-30,
vad_window_length=30,
vad_moving_average_width=8,
vad_max_silence_length=6,
mel_window_length=25,
mel_window_step=10,
n_mels=40,
partial_n_frames=160,
min_pad_coverage=0.75,
partial_overlap_ratio=0.5)
print("Audio Processor Done!")
speaker_encoder = LSTMSpeakerEncoder(
n_mels=40, num_layers=3, hidden_size=256, output_size=256)
speaker_encoder.set_state_dict(paddle.load(args.ge2e_params_path))
speaker_encoder.eval()
print("GE2E Done!")
with open(args.phones_dict, "r") as f:
phn_id = [line.strip().split() for line in f.readlines()]
vocab_size = len(phn_id)
print("vocab_size:", vocab_size)
odim = fastspeech2_config.n_mels
model = FastSpeech2(
idim=vocab_size, odim=odim, **fastspeech2_config["model"])
model.set_state_dict(
paddle.load(args.fastspeech2_checkpoint)["main_params"])
model.eval()
vocoder = PWGGenerator(**pwg_config["generator_params"])
vocoder.set_state_dict(paddle.load(args.pwg_checkpoint)["generator_params"])
vocoder.remove_weight_norm()
vocoder.eval()
print("model done!")
frontend = Frontend(phone_vocab_path=args.phones_dict)
print("frontend done!")
stat = np.load(args.fastspeech2_stat)
mu, std = stat
mu = paddle.to_tensor(mu)
std = paddle.to_tensor(std)
fastspeech2_normalizer = ZScore(mu, std)
stat = np.load(args.pwg_stat)
mu, std = stat
mu = paddle.to_tensor(mu)
std = paddle.to_tensor(std)
pwg_normalizer = ZScore(mu, std)
fastspeech2_inference = FastSpeech2Inference(fastspeech2_normalizer, model)
fastspeech2_inference.eval()
pwg_inference = PWGInference(pwg_normalizer, vocoder)
pwg_inference.eval()
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
input_dir = Path(args.input_dir)
sentence = args.text
input_ids = frontend.get_input_ids(sentence, merge_sentences=True)
phone_ids = input_ids["phone_ids"][0]
for name in os.listdir(input_dir):
utt_id = name.split(".")[0]
ref_audio_path = input_dir / name
mel_sequences = p.extract_mel_partials(p.preprocess_wav(ref_audio_path))
# print("mel_sequences: ", mel_sequences.shape)
with paddle.no_grad():
spk_emb = speaker_encoder.embed_utterance(
paddle.to_tensor(mel_sequences))
# print("spk_emb shape: ", spk_emb.shape)
with paddle.no_grad():
wav = pwg_inference(
fastspeech2_inference(phone_ids, spk_emb=spk_emb))
sf.write(
str(output_dir / (utt_id + ".wav")),
wav.numpy(),
samplerate=fastspeech2_config.fs)
print(f"{utt_id} done!")
# Randomly generate numbers of 0 ~ 0.2, 256 is the dim of spk_emb
random_spk_emb = np.random.rand(256) * 0.2
random_spk_emb = paddle.to_tensor(random_spk_emb)
utt_id = "random_spk_emb"
with paddle.no_grad():
wav = pwg_inference(fastspeech2_inference(phone_ids, spk_emb=spk_emb))
sf.write(
str(output_dir / (utt_id + ".wav")),
wav.numpy(),
samplerate=fastspeech2_config.fs)
print(f"{utt_id} done!")
def main():
# parse args and config and redirect to train_sp
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--fastspeech2-config", type=str, help="fastspeech2 config file.")
parser.add_argument(
"--fastspeech2-checkpoint",
type=str,
help="fastspeech2 checkpoint to load.")
parser.add_argument(
"--fastspeech2-stat",
type=str,
help="mean and standard deviation used to normalize spectrogram when training fastspeech2."
)
parser.add_argument(
"--pwg-config", type=str, help="parallel wavegan config file.")
parser.add_argument(
"--pwg-checkpoint",
type=str,
help="parallel wavegan generator parameters to load.")
parser.add_argument(
"--pwg-stat",
type=str,
help="mean and standard deviation used to normalize spectrogram when training parallel wavegan."
)
parser.add_argument(
"--phones-dict",
type=str,
default="phone_id_map.txt",
help="phone vocabulary file.")
parser.add_argument(
"--text",
type=str,
default="每当你觉得,想要批评什么人的时候,你切要记着,这个世界上的人,并非都具备你禀有的条件。",
help="text to synthesize, a line")
parser.add_argument(
"--ge2e_params_path", type=str, help="ge2e params path.")
parser.add_argument(
"--ngpu", type=int, default=1, help="if ngpu=0, use cpu.")
parser.add_argument(
"--input-dir",
type=str,
help="input dir of *.wav, the sample rate will be resample to 16k.")
parser.add_argument("--output-dir", type=str, help="output dir.")
args = parser.parse_args()
if args.ngpu == 0:
paddle.set_device("cpu")
elif args.ngpu > 0:
paddle.set_device("gpu")
else:
print("ngpu should >= 0 !")
with open(args.fastspeech2_config) as f:
fastspeech2_config = CfgNode(yaml.safe_load(f))
with open(args.pwg_config) as f:
pwg_config = CfgNode(yaml.safe_load(f))
print("========Args========")
print(yaml.safe_dump(vars(args)))
print("========Config========")
print(fastspeech2_config)
print(pwg_config)
voice_cloning(args, fastspeech2_config, pwg_config)
if __name__ == "__main__":
main()
```
#### File: t2s/modules/pqmf.py
```python
import numpy as np
import paddle
import paddle.nn.functional as F
from paddle import nn
from scipy.signal import kaiser
def design_prototype_filter(taps=62, cutoff_ratio=0.142, beta=9.0):
"""Design prototype filter for PQMF.
This method is based on `A Kaiser window approach for the design of prototype
filters of cosine modulated filterbanks`_.
Parameters
----------
taps : int
The number of filter taps.
cutoff_ratio : float
Cut-off frequency ratio.
beta : float
Beta coefficient for kaiser window.
Returns
----------
ndarray
Impluse response of prototype filter (taps + 1,).
.. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`:
https://ieeexplore.ieee.org/abstract/document/681427
"""
# check the arguments are valid
assert taps % 2 == 0, "The number of taps mush be even number."
assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0."
# make initial filter
omega_c = np.pi * cutoff_ratio
with np.errstate(invalid="ignore"):
h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) / (
np.pi * (np.arange(taps + 1) - 0.5 * taps))
h_i[taps //
2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form
# apply kaiser window
w = kaiser(taps + 1, beta)
h = h_i * w
return h
class PQMF(nn.Layer):
"""PQMF module.
This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_.
.. _`Near-perfect-reconstruction pseudo-QMF banks`:
https://ieeexplore.ieee.org/document/258122
"""
def __init__(self, subbands=4, taps=62, cutoff_ratio=0.142, beta=9.0):
"""Initilize PQMF module.
The cutoff_ratio and beta parameters are optimized for #subbands = 4.
See dicussion in https://github.com/kan-bayashi/ParallelWaveGAN/issues/195.
Parameters
----------
subbands : int
The number of subbands.
taps : int
The number of filter taps.
cutoff_ratio : float
Cut-off frequency ratio.
beta : float
Beta coefficient for kaiser window.
"""
super().__init__()
h_proto = design_prototype_filter(taps, cutoff_ratio, beta)
h_analysis = np.zeros((subbands, len(h_proto)))
h_synthesis = np.zeros((subbands, len(h_proto)))
for k in range(subbands):
h_analysis[k] = (
2 * h_proto * np.cos((2 * k + 1) * (np.pi / (2 * subbands)) * (
np.arange(taps + 1) - (taps / 2)) + (-1)**k * np.pi / 4))
h_synthesis[k] = (
2 * h_proto * np.cos((2 * k + 1) * (np.pi / (2 * subbands)) * (
np.arange(taps + 1) - (taps / 2)) - (-1)**k * np.pi / 4))
# convert to tensor
self.analysis_filter = paddle.to_tensor(
h_analysis, dtype="float32").unsqueeze(1)
self.synthesis_filter = paddle.to_tensor(
h_synthesis, dtype="float32").unsqueeze(0)
# filter for downsampling & upsampling
updown_filter = paddle.zeros(
(subbands, subbands, subbands), dtype="float32")
for k in range(subbands):
updown_filter[k, k, 0] = 1.0
self.updown_filter = updown_filter
self.subbands = subbands
# keep padding info
self.pad_fn = nn.Pad1D(taps // 2, mode='constant', value=0.0)
def analysis(self, x):
"""Analysis with PQMF.
Parameters
----------
x : Tensor
Input tensor (B, 1, T).
Returns
----------
Tensor
Output tensor (B, subbands, T // subbands).
"""
x = F.conv1d(self.pad_fn(x), self.analysis_filter)
return F.conv1d(x, self.updown_filter, stride=self.subbands)
def synthesis(self, x):
"""Synthesis with PQMF.
Parameters
----------
x : Tensor
Input tensor (B, subbands, T // subbands).
Returns
----------
Tensor
Output tensor (B, 1, T).
"""
x = F.conv1d_transpose(
x, self.updown_filter * self.subbands, stride=self.subbands)
return F.conv1d(self.pad_fn(x), self.synthesis_filter)
# when converting dygraph to static graph, can not use self.pqmf.synthesis directly
def forward(self, x):
return self.synthesis(x)
```
#### File: modules/transformer/embedding.py
```python
import math
import paddle
from paddle import nn
class PositionalEncoding(nn.Layer):
"""Positional encoding.
Parameters
----------
d_model : int
Embedding dimension.
dropout_rate : float
Dropout rate.
max_len : int
Maximum input length.
reverse : bool
Whether to reverse the input position.
type : str
dtype of param
"""
def __init__(self,
d_model,
dropout_rate,
max_len=5000,
dtype="float32",
reverse=False):
"""Construct an PositionalEncoding object."""
super().__init__()
self.d_model = d_model
self.reverse = reverse
self.xscale = math.sqrt(self.d_model)
self.dropout = nn.Dropout(p=dropout_rate)
self.pe = None
self.dtype = dtype
self.extend_pe(paddle.expand(paddle.zeros([1]), (1, max_len)))
def extend_pe(self, x):
"""Reset the positional encodings."""
x_shape = paddle.shape(x)
pe = paddle.zeros([x_shape[1], self.d_model])
if self.reverse:
position = paddle.arange(
x_shape[1] - 1, -1, -1.0, dtype=self.dtype).unsqueeze(1)
else:
position = paddle.arange(
0, x_shape[1], dtype=self.dtype).unsqueeze(1)
div_term = paddle.exp(
paddle.arange(0, self.d_model, 2, dtype=self.dtype) *
-(math.log(10000.0) / self.d_model))
pe[:, 0::2] = paddle.sin(position * div_term)
pe[:, 1::2] = paddle.cos(position * div_term)
pe = pe.unsqueeze(0)
self.pe = pe
def forward(self, x: paddle.Tensor):
"""Add positional encoding.
Parameters
----------
x : paddle.Tensor
Input tensor (batch, time, `*`).
Returns
----------
paddle.Tensor
Encoded tensor (batch, time, `*`).
"""
self.extend_pe(x)
T = paddle.shape(x)[1]
x = x * self.xscale + self.pe[:, :T]
return self.dropout(x)
class ScaledPositionalEncoding(PositionalEncoding):
"""Scaled positional encoding module.
See Sec. 3.2 https://arxiv.org/abs/1809.08895
Parameters
----------
d_model : int
Embedding dimension.
dropout_rate : float
Dropout rate.
max_len : int
Maximum input length.
dtype : str
dtype of param
"""
def __init__(self, d_model, dropout_rate, max_len=5000, dtype="float32"):
"""Initialize class."""
super().__init__(
d_model=d_model,
dropout_rate=dropout_rate,
max_len=max_len,
dtype=dtype)
x = paddle.ones([1], dtype=self.dtype)
self.alpha = paddle.create_parameter(
shape=x.shape,
dtype=self.dtype,
default_initializer=nn.initializer.Assign(x))
def reset_parameters(self):
"""Reset parameters."""
self.alpha = paddle.ones([1])
def forward(self, x):
"""Add positional encoding.
Parameters
----------
x : paddle.Tensor
Input tensor (batch, time, `*`).
Returns
----------
paddle.Tensor
Encoded tensor (batch, time, `*`).
"""
self.extend_pe(x)
T = paddle.shape(x)[1]
x = x + self.alpha * self.pe[:, :T]
return self.dropout(x)
class RelPositionalEncoding(nn.Layer):
"""Relative positional encoding module (new implementation).
Details can be found in https://github.com/espnet/espnet/pull/2816.
See : Appendix B in https://arxiv.org/abs/1901.02860
Parameters
----------
d_model : int
Embedding dimension.
dropout_rate : float
Dropout rate.
max_len : int
Maximum input length.
"""
def __init__(self, d_model, dropout_rate, max_len=5000, dtype="float32"):
"""Construct an PositionalEncoding object."""
super().__init__()
self.d_model = d_model
self.xscale = math.sqrt(self.d_model)
self.dropout = nn.Dropout(p=dropout_rate)
self.pe = None
self.dtype = dtype
self.extend_pe(paddle.expand(paddle.zeros([1]), (1, max_len)))
def extend_pe(self, x):
"""Reset the positional encodings."""
if self.pe is not None:
# self.pe contains both positive and negative parts
# the length of self.pe is 2 * input_len - 1
if paddle.shape(self.pe)[1] >= paddle.shape(x)[1] * 2 - 1:
return
# Suppose `i` means to the position of query vecotr and `j` means the
# position of key vector. We use position relative positions when keys
# are to the left (i>j) and negative relative positions otherwise (i<j).
x_shape = paddle.shape(x)
pe_positive = paddle.zeros([x_shape[1], self.d_model])
pe_negative = paddle.zeros([x_shape[1], self.d_model])
position = paddle.arange(0, x_shape[1], dtype=self.dtype).unsqueeze(1)
div_term = paddle.exp(
paddle.arange(0, self.d_model, 2, dtype=self.dtype) *
-(math.log(10000.0) / self.d_model))
pe_positive[:, 0::2] = paddle.sin(position * div_term)
pe_positive[:, 1::2] = paddle.cos(position * div_term)
pe_negative[:, 0::2] = paddle.sin(-1 * position * div_term)
pe_negative[:, 1::2] = paddle.cos(-1 * position * div_term)
# Reserve the order of positive indices and concat both positive and
# negative indices. This is used to support the shifting trick
# as in https://arxiv.org/abs/1901.02860
pe_positive = paddle.flip(pe_positive, [0]).unsqueeze(0)
pe_negative = pe_negative[1:].unsqueeze(0)
pe = paddle.concat([pe_positive, pe_negative], axis=1)
self.pe = pe
def forward(self, x: paddle.Tensor):
"""Add positional encoding.
Parameters
----------
x : paddle.Tensor
Input tensor (batch, time, `*`).
Returns
----------
paddle.Tensor
Encoded tensor (batch, time, `*`).
"""
self.extend_pe(x)
x = x * self.xscale
T = paddle.shape(x)[1]
pe_size = paddle.shape(self.pe)
pos_emb = self.pe[:, pe_size[1] // 2 - T + 1:pe_size[1] // 2 + T, ]
return self.dropout(x), self.dropout(pos_emb)
```
#### File: t2s/modules/upsample.py
```python
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from paddle import nn
from paddle.nn import functional as F
from paddlespeech.t2s.modules.activation import get_activation
class Stretch2D(nn.Layer):
def __init__(self, w_scale: int, h_scale: int, mode: str="nearest"):
"""Strech an image (or image-like object) with some interpolation.
Parameters
----------
w_scale : int
Scalar of width.
h_scale : int
Scalar of the height.
mode : str, optional
Interpolation mode, modes suppored are "nearest", "bilinear",
"trilinear", "bicubic", "linear" and "area",by default "nearest"
For more details about interpolation, see
`paddle.nn.functional.interpolate <https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/nn/functional/interpolate_en.html>`_.
"""
super().__init__()
self.w_scale = w_scale
self.h_scale = h_scale
self.mode = mode
def forward(self, x):
"""
Parameters
----------
x : Tensor
Shape (N, C, H, W)
Returns
-------
Tensor
Shape (N, C, H', W'), where ``H'=h_scale * H``, ``W'=w_scale * W``.
The stretched image.
"""
out = F.interpolate(
x, scale_factor=(self.h_scale, self.w_scale), mode=self.mode)
return out
class UpsampleNet(nn.Layer):
"""A Layer to upsample spectrogram by applying consecutive stretch and
convolutions.
Parameters
----------
upsample_scales : List[int]
Upsampling factors for each strech.
nonlinear_activation : Optional[str], optional
Activation after each convolution, by default None
nonlinear_activation_params : Dict[str, Any], optional
Parameters passed to construct the activation, by default {}
interpolate_mode : str, optional
Interpolation mode of the strech, by default "nearest"
freq_axis_kernel_size : int, optional
Convolution kernel size along the frequency axis, by default 1
use_causal_conv : bool, optional
Whether to use causal padding before convolution, by default False
If True, Causal padding is used along the time axis, i.e. padding
amount is ``receptive field - 1`` and 0 for before and after,
respectively.
If False, "same" padding is used along the time axis.
"""
def __init__(self,
upsample_scales: List[int],
nonlinear_activation: Optional[str]=None,
nonlinear_activation_params: Dict[str, Any]={},
interpolate_mode: str="nearest",
freq_axis_kernel_size: int=1,
use_causal_conv: bool=False):
super().__init__()
self.use_causal_conv = use_causal_conv
self.up_layers = nn.LayerList()
for scale in upsample_scales:
stretch = Stretch2D(scale, 1, interpolate_mode)
assert freq_axis_kernel_size % 2 == 1
freq_axis_padding = (freq_axis_kernel_size - 1) // 2
kernel_size = (freq_axis_kernel_size, scale * 2 + 1)
if use_causal_conv:
padding = (freq_axis_padding, scale * 2)
else:
padding = (freq_axis_padding, scale)
conv = nn.Conv2D(
1, 1, kernel_size, padding=padding, bias_attr=False)
self.up_layers.extend([stretch, conv])
if nonlinear_activation is not None:
# for compatibility
nonlinear_activation = nonlinear_activation.lower()
nonlinear = get_activation(nonlinear_activation,
**nonlinear_activation_params)
self.up_layers.append(nonlinear)
def forward(self, c):
"""
Parameters
----------
c : Tensor
Shape (N, F, T), spectrogram
Returns
-------
Tensor
Shape (N, F, T'), where ``T' = upsample_factor * T``, upsampled
spectrogram
"""
c = c.unsqueeze(1)
for f in self.up_layers:
if self.use_causal_conv and isinstance(f, nn.Conv2D):
c = f(c)[:, :, :, c.shape[-1]]
else:
c = f(c)
return c.squeeze(1)
class ConvInUpsampleNet(nn.Layer):
"""A Layer to upsample spectrogram composed of a convolution and an
UpsampleNet.
Parameters
----------
upsample_scales : List[int]
Upsampling factors for each strech.
nonlinear_activation : Optional[str], optional
Activation after each convolution, by default None
nonlinear_activation_params : Dict[str, Any], optional
Parameters passed to construct the activation, by default {}
interpolate_mode : str, optional
Interpolation mode of the strech, by default "nearest"
freq_axis_kernel_size : int, optional
Convolution kernel size along the frequency axis, by default 1
aux_channels : int, optional
Feature size of the input, by default 80
aux_context_window : int, optional
Context window of the first 1D convolution applied to the input. It
related to the kernel size of the convolution, by default 0
If use causal convolution, the kernel size is ``window + 1``, else
the kernel size is ``2 * window + 1``.
use_causal_conv : bool, optional
Whether to use causal padding before convolution, by default False
If True, Causal padding is used along the time axis, i.e. padding
amount is ``receptive field - 1`` and 0 for before and after,
respectively.
If False, "same" padding is used along the time axis.
"""
def __init__(self,
upsample_scales: List[int],
nonlinear_activation: Optional[str]=None,
nonlinear_activation_params: Dict[str, Any]={},
interpolate_mode: str="nearest",
freq_axis_kernel_size: int=1,
aux_channels: int=80,
aux_context_window: int=0,
use_causal_conv: bool=False):
super().__init__()
self.aux_context_window = aux_context_window
self.use_causal_conv = use_causal_conv and aux_context_window > 0
kernel_size = aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1
self.conv_in = nn.Conv1D(
aux_channels,
aux_channels,
kernel_size=kernel_size,
bias_attr=False)
self.upsample = UpsampleNet(
upsample_scales=upsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
interpolate_mode=interpolate_mode,
freq_axis_kernel_size=freq_axis_kernel_size,
use_causal_conv=use_causal_conv)
def forward(self, c):
"""
Parameters
----------
c : Tensor
Shape (N, F, T), spectrogram
Returns
-------
Tensors
Shape (N, F, T'), where ``T' = upsample_factor * T``, upsampled
spectrogram
"""
c_ = self.conv_in(c)
c = c_[:, :, :-self.aux_context_window] if self.use_causal_conv else c_
return self.upsample(c)
```
#### File: unit/tts/test_reporter.py
```python
import numpy as np
from paddlespeech.t2s.training.reporter import report
from paddlespeech.t2s.training.reporter import scope
from paddlespeech.t2s.training.reporter import Summary
def test_reporter_scope():
first = {}
second = {}
third = {}
with scope(first):
report("first_begin", 1)
with scope(second):
report("second_begin", 2)
with scope(third):
report("third_begin", 3)
report("third_end", 4)
report("seconf_end", 5)
report("first_end", 6)
assert first == {'first_begin': 1, 'first_end': 6}
assert second == {'second_begin': 2, 'seconf_end': 5}
assert third == {'third_begin': 3, 'third_end': 4}
print(first)
print(second)
print(third)
def test_summary():
summary = Summary()
summary.add(1)
summary.add(2)
summary.add(3)
state = summary.make_statistics()
print(state)
np.testing.assert_allclose(
np.array(list(state)), np.array([2.0, np.std([1, 2, 3])]))
```
#### File: PaddleSpeech/utils/compute_statistics.py
```python
import argparse
import logging
from pathlib import Path
import jsonlines
import numpy as np
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from paddlespeech.t2s.datasets.data_table import DataTable
def main():
"""Run preprocessing process."""
parser = argparse.ArgumentParser(
description="Compute mean and variance of dumped raw features.")
parser.add_argument(
"--metadata", type=str, help="json file with id and file paths ")
parser.add_argument(
"--field-name",
type=str,
help="name of the field to compute statistics for.")
parser.add_argument(
"--output",
type=str,
help="path to save statistics. if not provided, "
"stats will be saved in the above root directory with name stats.npy")
def str2bool(str):
return True if str.lower() == 'true' else False
parser.add_argument(
"--use-relative-path",
type=str2bool,
default=False,
help="whether use relative path in metadata")
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)")
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
)
logging.warning('Skip DEBUG/INFO messages')
# check directory existence
if args.output is None:
args.output = Path(
args.metadata).parent.with_name(args.field_name + "_stats.npy")
else:
args.output = Path(args.output)
args.output.parent.mkdir(parents=True, exist_ok=True)
with jsonlines.open(args.metadata, 'r') as reader:
metadata = list(reader)
if args.use_relative_path:
# if use_relative_path in preprocess, covert it to absolute path here
metadata_dir = Path(args.metadata).parent
for item in metadata:
item["feats"] = str(metadata_dir / item["feats"])
dataset = DataTable(
metadata,
fields=[args.field_name],
converters={args.field_name: np.load}, )
logging.info(f"The number of files = {len(dataset)}.")
# calculate statistics
scaler = StandardScaler()
for datum in tqdm(dataset):
# StandardScalar supports (*, num_features) by default
scaler.partial_fit(datum[args.field_name])
stats = np.stack([scaler.mean_, scaler.scale_], axis=0)
np.save(str(args.output), stats.astype(np.float32), allow_pickle=False)
if __name__ == "__main__":
main()
```
#### File: utils/fst/ctc_token_fst_corrected.py
```python
import argparse
def il(n):
"""ilabel"""
return n + 1
def ol(n):
"""olabel"""
return n + 1
def s(n):
"""state"""
return n
def main(args):
with open(args.token_file) as f:
lines = f.readlines()
# token count w/0 <blank> <eps>
phone_count = 0
disambig_count = 0
for line in lines:
sp = line.strip().split()
phone = sp[0]
if phone == '<eps>' or phone == '<blank>':
continue
if phone.startswith('#'):
disambig_count += 1
else:
phone_count += 1
# 1. add start state
# first token is <blank>:0
print('0 0 {} 0'.format(il(0)))
# 2. 0 -> i, i -> i, i -> 0
# non-blank token start from 1
for i in range(1, phone_count + 1):
# eating `token`
print('0 {} {} {}'.format(s(i), il(i), ol(i)))
# remove repeating `token`
print('{} {} {} 0'.format(s(i), s(i), il(i)))
# skip ending <blank> `token`
print('{} 0 {} 0'.format(s(i), il(0)))
# 3. i -> other phone
# non-blank token to other non-blank token
for i in range(1, phone_count + 1):
for j in range(1, phone_count + 1):
if i != j:
print('{} {} {} {}'.format(s(i), s(j), il(j), ol(j)))
# 4. add disambiguous arcs on every final state
# blank and non-blank token maybe ending with disambiguous `token`
for i in range(0, phone_count + 1):
for j in range(phone_count + 2, phone_count + disambig_count + 2):
print('{} {} {} {}'.format(s(i), s(i), 0, j))
# 5. every i is final state
# blank and non-blank `token` are final state
for i in range(0, phone_count + 1):
print(s(i))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='FST: CTC Token unfold FST transducer')
parser.add_argument(
'--token_file',
required=True,
help='e2e model token file. line: token(char/phone/spm/disambigous)')
args = parser.parse_args()
main(args)
```
#### File: utils/fst/ctc_token_fst.py
```python
import argparse
def main(args):
"""Token Transducer"""
# <eps> entry
print('0 1 <eps> <eps>')
# skip begining and ending <blank>
print('1 1 <blank> <eps>')
print('2 2 <blank> <eps>')
# <eps> exit
print('2 0 <eps> <eps>')
# linking `token` between node 1 and node 2
with open(args.token_file, 'r') as fin:
node = 3
for entry in fin:
fields = entry.strip().split(' ')
phone = fields[0]
if phone == '<eps>' or phone == '<blank>':
continue
elif '#' in phone:
# disambiguous phone
# `token` maybe ending with disambiguous symbol
print('{} {} {} {}'.format(0, 0, '<eps>', phone))
else:
# eating `token`
print('{} {} {} {}'.format(1, node, phone, phone))
# remove repeating `token`
print('{} {} {} {}'.format(node, node, phone, '<eps>'))
# leaving `token`
print('{} {} {} {}'.format(node, 2, '<eps>', '<eps>'))
node += 1
# Fianl node
print('0')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='FST: CTC Token FST transducer')
parser.add_argument(
'--token_file',
required=True,
help='e2e model token file. line: token(char/phone/spm/disambigous)')
args = parser.parse_args()
main(args)
```
#### File: utils/fst/prepare_dict.py
```python
import argparse
def main(args):
# load `unit` or `vocab` file
unit_table = set()
with open(args.unit_file, 'r') as fin:
for line in fin:
unit = line.strip()
unit_table.add(unit)
def contain_oov(units):
for unit in units:
if unit not in unit_table:
return True
return False
# load spm model
bpemode = args.bpemodel
if bpemode:
import sentencepiece as spm
sp = spm.SentencePieceProcessor()
sp.Load(sys.bpemodel)
# used to filter polyphone
lexicon_table = set()
with open(args.in_lexicon, 'r') as fin, \
open(args.out_lexicon, 'w') as fout:
for line in fin:
word = line.split()[0]
if word == 'SIL' and not bpemode: # `sil` might be a valid piece in bpemodel
continue
elif word == '<SPOKEN_NOISE>':
continue
else:
# each word only has one pronunciation for e2e system
if word in lexicon_table:
continue
if bpemode:
pieces = sp.EncodeAsPieces(word)
if contain_oov(pieces):
print('Ignoring words {}, which contains oov unit'.
format(''.join(word).strip('▁')))
continue
chars = ' '.join(
[p if p in unit_table else '<unk>' for p in pieces])
else:
# ignore words with OOV
if contain_oov(word):
print('Ignoring words {}, which contains oov unit'.
format(word))
continue
# Optional, append ▁ in front of english word
# we assume the model unit of our e2e system is char now.
if word.encode('utf8').isalpha() and '▁' in unit_table:
word = '▁' + word
chars = ' '.join(word) # word is a char list
fout.write('{} {}\n'.format(word, chars))
lexicon_table.add(word)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='FST: preprae e2e(char/spm) dict')
parser.add_argument(
'--unit_file',
required=True,
help='e2e model unit file(lang_char.txt/vocab.txt). line: char/spm_pices'
)
parser.add_argument(
'--in_lexicon',
required=True,
help='raw lexicon file. line: word ph0 ... phn')
parser.add_argument(
'--out_lexicon',
required=True,
help='output lexicon file. line: word char0 ... charn')
parser.add_argument('--bpemodel', default=None, help='bpemodel')
args = parser.parse_args()
print(args)
main(args)
``` |
{
"source": "JiehangXie/PASSL",
"score": 2
} |
#### File: passl/solver/lr_scheduler.py
```python
import math
import paddle
import numpy as np
from paddle.optimizer.lr import MultiStepDecay, LRScheduler
from paddle.optimizer.lr import CosineAnnealingDecay
from paddle.optimizer.lr import LinearWarmup
from .builder import LRSCHEDULERS, build_lr_scheduler, build_lr_scheduler_simclr
from .byol_lr_scheduler import ByolLRScheduler
LRSCHEDULERS.register(LinearWarmup)
LRSCHEDULERS.register(MultiStepDecay)
LRSCHEDULERS.register(CosineAnnealingDecay)
LRSCHEDULERS.register(ByolLRScheduler)
class Cosine(LRScheduler):
"""
Cosine learning rate decay
lr = 0.05 * (math.cos(epoch * (math.pi / epochs)) + 1)
Args:
lr(float): initial learning rate
step_each_epoch(int): steps each epoch
epochs(int): total training epochs
"""
def __init__(self,
learning_rate,
T_max,
warmup_steps,
eta_min=0,
last_epoch=1,
verbose=False):
super(Cosine, self).__init__(learning_rate,
last_epoch=last_epoch,
verbose=verbose)
self.T_max = T_max
self.warmup_steps = warmup_steps
self.eta_min = eta_min
self.last_epoch = last_epoch
def get_lr(self):
if self.last_epoch == 0:
return self.base_lr
elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0:
return self.last_lr + (self.base_lr - self.eta_min) * (1 - math.cos(
math.pi / self.T_max)) / 2
return self.eta_min + 0.5 * (
self.base_lr - self.eta_min) * (
1 + np.cos(np.pi * self.last_epoch / (self.T_max - self.warmup_steps)))
LRSCHEDULERS.register()
class CosineWarmup(LinearWarmup):
"""
Cosine learning rate decay with warmup
[0, warmup_epoch): linear warmup
[warmup_epoch, epochs): cosine decay
Args:
lr(float): initial learning rate
step_each_epoch(int): steps each epoch
epochs(int): total training epochs
warmup_epoch(int): epoch num of warmup
"""
def __init__(self,
learning_rate,
warmup_steps,
start_lr,
end_lr,
T_max,
eta_min=0,
last_epoch=-1,
verbose=False):
#start_lr = 0.0
lr_sch = Cosine(learning_rate,
T_max,
warmup_steps,
eta_min=eta_min,
last_epoch=last_epoch,
verbose=verbose)
super(CosineWarmup, self).__init__(
learning_rate=lr_sch,
warmup_steps=warmup_steps,
start_lr=start_lr,
last_epoch=last_epoch,
end_lr=end_lr)
self.update_specified = False
@LRSCHEDULERS.register()
class Cosinesimclr(LRScheduler):
def __init__(self,
learning_rate,
T_max,
last_epoch=-1,
verbose=False):
self.T_max = T_max
super(Cosinesimclr, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
return self.base_lr* (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2
@LRSCHEDULERS.register()
class simclrCosineWarmup(LinearWarmup):
def __init__(self, lr, warmup_steps, T_max, current_iter, last_epoch=-1, warmup_epoch=10, **kwargs):
warmup_steps = warmup_steps
T_max = T_max
start_lr = 0.0
lr = lr
lr_sch = Cosinesimclr(lr, T_max, last_epoch=-1)
super(simclrCosineWarmup, self).__init__(
learning_rate=lr_sch,
warmup_steps=warmup_steps,
start_lr=start_lr,
last_epoch=last_epoch,
end_lr=lr)
self.update_specified = False
LRSCHEDULERS.register(Cosine)
LRSCHEDULERS.register(CosineWarmup)
``` |
{
"source": "JIeHb/roulette-tg-bot",
"score": 3
} |
#### File: roulette-tg-bot/modules/utils.py
```python
import json
from modules.db_manager import unauth_users, auth_users
def is_fefu_email(email):
email = email.strip()
if email.count('@dvfu.ru') + email.count('@students.dvfu.ru') == 1:
email = email.split('@')[0]
if email.count('.') + email.count('_') == 1:
for i in range(len(email)):
if (ord(email[i])<97 or ord(email[i])>122) and email[i]!='.' and email[i]!='_':
return False
return True
return False
def is_full_name(name):
data = name.lower().title().split()
if len(data) != 3:
return False
for i in data:
if not i.isalpha():
return False
return data
with open('group_list.json', encoding='utf-8') as f:
groups = json.load(f)['data']
def check_group(group):
return group in groups
``` |
{
"source": "JieHong-Liu/ig_photo_downloader",
"score": 3
} |
#### File: JieHong-Liu/ig_photo_downloader/ig.py
```python
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import time
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
# A subprogram that check wether any pic is the same, make sure that there is no pic is the same as new pic.
def check(img_list, new_pic):
for i in img_list:
if i == new_pic:
return 0
return 1
def ig_download():
driver = webdriver.Chrome()
id = input('Enter the id you want to download: ')
# if you don't have account you can input '0'
user_id = input('Enter your account: ')
user_password = input('Enter your password: ')
annoymus = 0 # initial the annoymus variable
# if you don't want to login your account, only you can download is the public accounts.
if user_id == '0' and user_password == '0':
annoymus = 1
# open the url
if(not annoymus):
# if you want to login, goto the login page.
driver.get(
'https://www.instagram.com/accounts/login/?source=private_profile')
try:
# login part
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.NAME, "username"))
)
login_user = driver.find_element_by_name('username')
login_user.send_keys(user_id)
login_password = driver.find_element_by_name('password')
login_password.send_keys(<PASSWORD>)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((
By.XPATH, '//*[@id="loginForm"]/div/div[3]/button')
))
login = driver.find_element_by_xpath(
'//*[@id="loginForm"]/div/div[3]/button')
login.click()
# sometimes the page will ask me to save the password
WebDriverWait(driver, 10).until(
EC.presence_of_element_located(
(By.XPATH, "/html/body/div[1]/section/main/div/div/div/section/div/button"))
)
save = driver.find_element_by_xpath(
"/html/body/div[1]/section/main/div/div/div/section/div/button")
save.click()
# open the notification window
WebDriverWait(driver, 10).until(
EC.presence_of_element_located(
(By.XPATH, "/html/body/div[4]/div/div/div/div[3]/button[2]"))
)
later = driver.find_element_by_xpath(
"/html/body/div[4]/div/div/div/div[3]/button[2]")
later.click()
finally:
print("finished login")
driver.get("https://www.instagram.com/"+id+"/")
driver.maximize_window()
# write the html.txt
time.sleep(1) # wait it to ready
soup = BeautifulSoup(driver.page_source, 'lxml')
numbers = soup.find_all('span', class_='g47SY')
# posts
print("This id has " + str(numbers[0].text) + " posts. ")
# followers
print("This id has " + str(numbers[1].text) + " followers. ")
# following
print("This id is following " + str(numbers[2].text) + " people")
time.sleep(1)
img_list = [] # list of saving image
soup = BeautifulSoup(driver.page_source, 'lxml')
time.sleep(1) # wait it to ready
div_list = []
div_list = soup.find_all('div', class_='KL4Bh')
if (not len(div_list)):
print('This ID is private! ')
driver.close()
else:
print('The download is going to start : ')
i = 1
# make sure that the scrolling is enough
for j in range(0, int(int(numbers[0].text) / 10)):
soup = BeautifulSoup(driver.page_source, 'lxml')
time.sleep(1) # wait it to ready
div_list = soup.find_all('div', class_='KL4Bh')
for div in div_list:
# check if there is any pic is the same.
if check(img_list, div.img.get('src')):
# print the post which is downloaded
print(div.img.get('src'))
with open(id+'['+str(i)+'].jpg', 'wb') as file:
download = requests.get(div.img.get('src'))
file.write(download.content)
# save to image list
img_list.append(div.img.get('src'))
i = i + 1
driver.execute_script(
'window.scrollTo(0, document.body.scrollHeight);')
time.sleep(1)
print("Download finished!")
ig_download()
``` |
{
"source": "jiehuan/m2cgen",
"score": 2
} |
#### File: tests/assemblers/test_linear_lightning.py
```python
from lightning.classification import AdaGradClassifier
from lightning.regression import AdaGradRegressor
from m2cgen import assemblers, ast
from tests import utils
def test_regression():
estimator = AdaGradRegressor(random_state=1)
utils.get_regression_model_trainer()(estimator)
assembler = assemblers.SklearnLinearModelAssembler(estimator)
actual = assembler.assemble()
feature_weight_mul = [
ast.BinNumExpr(
ast.FeatureRef(0),
ast.NumVal(-0.08558826944690746),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(1),
ast.NumVal(0.0803724696787377),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(2),
ast.NumVal(-0.03516743076774846),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(3),
ast.NumVal(0.26469178947134087),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(4),
ast.NumVal(0.15651985221012488),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(5),
ast.NumVal(1.5186399078028587),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(6),
ast.NumVal(0.10089874009193693),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(7),
ast.NumVal(-0.011426237067026246),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(8),
ast.NumVal(0.0861987777487293),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(9),
ast.NumVal(-0.0057791506839322574),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(10),
ast.NumVal(0.3357752757550913),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(11),
ast.NumVal(0.020189965076849486),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(12),
ast.NumVal(-0.7390647599317609),
ast.BinNumOpType.MUL),
]
expected = assemblers.utils.apply_op_to_expressions(
ast.BinNumOpType.ADD,
ast.NumVal(0.0),
*feature_weight_mul)
assert utils.cmp_exprs(actual, expected)
def test_binary_class():
estimator = AdaGradClassifier(random_state=1)
utils.get_binary_classification_model_trainer()(estimator)
assembler = assemblers.SklearnLinearModelAssembler(estimator)
actual = assembler.assemble()
feature_weight_mul = [
ast.BinNumExpr(
ast.FeatureRef(0),
ast.NumVal(0.16218889967390476),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(1),
ast.NumVal(0.10012761963766906),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(2),
ast.NumVal(0.6289276652681673),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(3),
ast.NumVal(0.17618420156072845),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(4),
ast.NumVal(0.0010492096607182045),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(5),
ast.NumVal(-0.0029135563693806913),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(6),
ast.NumVal(-0.005923882409142498),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(7),
ast.NumVal(-0.0023293599172479755),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(8),
ast.NumVal(0.0020808828960210517),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(9),
ast.NumVal(0.0009846430705550103),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(10),
ast.NumVal(0.0010399810925427265),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(11),
ast.NumVal(0.011203056917272093),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(12),
ast.NumVal(-0.007271351370867731),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(13),
ast.NumVal(-0.26333437096804224),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(14),
ast.NumVal(1.8533543368532444e-05),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(15),
ast.NumVal(-0.0008266341686278445),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(16),
ast.NumVal(-0.0011090316301215724),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(17),
ast.NumVal(-0.0001910857095336291),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(18),
ast.NumVal(0.00010735116208006556),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(19),
ast.NumVal(-4.076097659514017e-05),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(20),
ast.NumVal(0.15300712110146406),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(21),
ast.NumVal(0.06316277258339074),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(22),
ast.NumVal(0.495291178977687),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(23),
ast.NumVal(-0.29589136204657845),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(24),
ast.NumVal(0.000771932729567487),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(25),
ast.NumVal(-0.011877978242492428),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(26),
ast.NumVal(-0.01678004536869617),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(27),
ast.NumVal(-0.004070431062579625),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(28),
ast.NumVal(0.001158641497209262),
ast.BinNumOpType.MUL),
ast.BinNumExpr(
ast.FeatureRef(29),
ast.NumVal(0.00010737287732588742),
ast.BinNumOpType.MUL),
]
expected = assemblers.utils.apply_op_to_expressions(
ast.BinNumOpType.ADD,
ast.NumVal(0.0),
*feature_weight_mul)
assert utils.cmp_exprs(actual, expected)
def test_multi_class():
estimator = AdaGradClassifier(random_state=1)
utils.get_classification_model_trainer()(estimator)
assembler = assemblers.SklearnLinearModelAssembler(estimator)
actual = assembler.assemble()
expected = ast.VectorVal([
ast.BinNumExpr(
ast.BinNumExpr(
ast.BinNumExpr(
ast.BinNumExpr(
ast.NumVal(0.0),
ast.BinNumExpr(
ast.FeatureRef(0),
ast.NumVal(0.09686334892116512),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.FeatureRef(1),
ast.NumVal(0.32572202133211947),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.FeatureRef(2),
ast.NumVal(-0.48444233646554424),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.FeatureRef(3),
ast.NumVal(-0.219719145605816),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.BinNumExpr(
ast.BinNumExpr(
ast.BinNumExpr(
ast.NumVal(0.0),
ast.BinNumExpr(
ast.FeatureRef(0),
ast.NumVal(-0.1089136473832088),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.FeatureRef(1),
ast.NumVal(-0.16956003333433572),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.FeatureRef(2),
ast.NumVal(0.0365531256007199),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.FeatureRef(3),
ast.NumVal(-0.01016100116780896),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.BinNumExpr(
ast.BinNumExpr(
ast.BinNumExpr(
ast.NumVal(0.0),
ast.BinNumExpr(
ast.FeatureRef(0),
ast.NumVal(-0.16690339219780817),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.FeatureRef(1),
ast.NumVal(-0.19466284646233858),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.FeatureRef(2),
ast.NumVal(0.2953585236360389),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD),
ast.BinNumExpr(
ast.FeatureRef(3),
ast.NumVal(0.21288203082531384),
ast.BinNumOpType.MUL),
ast.BinNumOpType.ADD)])
assert utils.cmp_exprs(actual, expected)
```
#### File: tests/assemblers/test_meta.py
```python
import pytest
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import RANSACRegressor
from sklearn.tree import DecisionTreeRegressor
from m2cgen import ast
from m2cgen.assemblers import RANSACModelAssembler
from tests.utils import cmp_exprs
def test_ransac_custom_base_estimator():
base_estimator = DecisionTreeRegressor(random_state=1, max_depth=1)
estimator = RANSACRegressor(base_estimator=base_estimator, random_state=1)
estimator.fit([[1], [2]], [1, 2])
assembler = RANSACModelAssembler(estimator)
actual = assembler.assemble()
expected = ast.IfExpr(
ast.CompExpr(
ast.FeatureRef(0),
ast.NumVal(1.5),
ast.CompOpType.LTE),
ast.NumVal(1.0),
ast.NumVal(2.0))
assert cmp_exprs(actual, expected)
def test_ransac_unknown_base_estimator():
base_estimator = DummyRegressor()
estimator = RANSACRegressor(base_estimator=base_estimator, random_state=1)
estimator.fit([[1], [2], [3]], [1, 2, 3])
assembler = RANSACModelAssembler(estimator)
with pytest.raises(NotImplementedError, match="Model 'sklearn_DummyRegressor' is not supported"):
assembler.assemble()
```
#### File: e2e/executors/python.py
```python
from m2cgen.assemblers import get_assembler_cls
from m2cgen.interpreters import PythonInterpreter
from tests.e2e.executors.base import BaseExecutor
from tests.utils import write_content_to_file
class PythonExecutor(BaseExecutor):
def __init__(self, model):
self.model_name = "score"
self.model = model
self.interpreter = PythonInterpreter()
assembler_cls = get_assembler_cls(model)
self.model_ast = assembler_cls(model).assemble()
self.script_path = None
def predict(self, X):
scope = {}
exec(compile(self.script_path.read_bytes(), self.script_path, mode='exec'), scope)
# Use .tolist() since we want to use raw list of floats.
return scope['score'](X.tolist())
def prepare(self):
model_code = self.interpreter.interpret(self.model_ast)
self.script_path = self._resource_tmp_dir / f"{self.model_name}.py"
write_content_to_file(model_code, self.script_path)
```
#### File: m2cgen/tests/test_cli.py
```python
import io
import sys
from unittest import mock
from _pytest import capture
from m2cgen import __version__, cli
from tests.utils import verify_python_model_is_expected
def _get_mock_args(
indent=4,
function_name=None,
namespace=None,
module_name=None,
package_name=None,
class_name=None,
infile=None,
language=None
):
return mock.MagicMock(
indent=indent,
function_name=function_name,
namespace=namespace,
module_name=module_name,
package_name=package_name,
class_name=class_name,
infile=infile,
language=language,
recursion_limit=cli.MAX_RECURSION_DEPTH)
def test_file_as_input(tmp_path):
f = tmp_path / "hello.txt"
f.write_text("123")
input_args = ["-l", "python", str(f)]
args = cli.parse_args(input_args)
assert args.language == "python"
assert isinstance(args.infile, io.BufferedReader)
assert args.infile.name == str(f)
def test_stdin_as_input(request):
input_args = ["--language", "python"]
args = cli.parse_args(input_args)
assert args.language == "python"
# Since pytest by default captures stdin, but sometimes we need to disable
# it (primarily for using (i)pdb), we have 2 different strategies to verify
# that stdin was returned as infile.
capturemanager = request.config.pluginmanager.getplugin("capturemanager")
if capturemanager.is_globally_capturing():
assert isinstance(args.infile, capture.DontReadFromInput)
else:
assert args.infile.name == "<stdin>"
@mock.patch.object(sys, "exit")
def test_language_is_required(mocked_exit):
mocked_stderr = io.StringIO()
with mock.patch.object(sys, "stderr", new=mocked_stderr):
cli.parse_args([])
assert "the following arguments are required: --language" in mocked_stderr.getvalue()
mocked_exit.assert_called_with(2)
def test_generate_code(pickled_model):
mock_args = _get_mock_args(infile=pickled_model, language="python")
generated_code = cli.generate_code(mock_args)
verify_python_model_is_expected(
generated_code,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
expected_output=-44.40540274041321)
def test_function_name(pickled_model):
mock_args = _get_mock_args(infile=pickled_model, language="python", function_name="predict")
generated_code = cli.generate_code(mock_args).strip()
assert generated_code.startswith("def predict")
def test_function_name_csharp_default(pickled_model):
mock_args = _get_mock_args(infile=pickled_model, language="c_sharp")
generated_code = cli.generate_code(mock_args).strip()
assert 'public static double Score' in generated_code
def test_class_name(pickled_model):
mock_args = _get_mock_args(infile=pickled_model, language="java", class_name="TestClassName")
generated_code = cli.generate_code(mock_args).strip()
assert generated_code.startswith("public class TestClassName")
def test_package_name(pickled_model):
mock_args = _get_mock_args(infile=pickled_model, language="java", package_name="foo.bar.baz")
generated_code = cli.generate_code(mock_args).strip()
assert generated_code.startswith("package foo.bar.baz;")
def test_module_name(pickled_model):
mock_args = _get_mock_args(infile=pickled_model, language="visual_basic", module_name="TestModule")
generated_code = cli.generate_code(mock_args).strip()
assert generated_code.startswith("Module TestModule")
def test_namespace(pickled_model):
mock_args = _get_mock_args(infile=pickled_model, language="c_sharp", namespace="Tests.ML")
generated_code = cli.generate_code(mock_args).strip()
assert "namespace Tests.ML {" in generated_code
def test_indent(pickled_model):
mock_args = _get_mock_args(infile=pickled_model, indent=0, language="c_sharp")
generated_code = cli.generate_code(mock_args).strip()
assert generated_code.startswith("""
namespace ML {
public static class Model {
public static double Score(double[] input) {
return (
""".strip())
@mock.patch.object(sys, "exit")
def test_version(mocked_exit):
mocked_stdout = io.StringIO()
with mock.patch.object(sys, "stdout", new=mocked_stdout):
cli.parse_args(["-v"])
assert mocked_stdout.getvalue().strip() == f"m2cgen {__version__}"
def test_unsupported_args_are_ignored(pickled_model):
mock_args = _get_mock_args(
infile=pickled_model, language="python", class_name="TestClassName", package_name="foo.bar.baz")
generated_code = cli.generate_code(mock_args)
verify_python_model_is_expected(
generated_code,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
expected_output=-44.40540274041321)
``` |
{
"source": "JiehuaYang/DLCA",
"score": 2
} |
#### File: JiehuaYang/DLCA/train.py
```python
import argparse
import os
import time
import numpy as np
import data
from importlib import import_module
import shutil
from utils.log_utils import *
import sys
import torch
from torch.nn import DataParallel
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torch import optim
from torch.autograd import Variable
parser = argparse.ArgumentParser(description='ca detection')
parser.add_argument('--model', '-m', metavar='MODEL', default='model.network',
help='model')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=500, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=12, type=int,
metavar='N', help='mini-batch size (default: 16)')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--save-freq', default='1', type=int, metavar='S',
help='save frequency')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--input', default='', type=str, metavar='SAVE',
help='directory to save train images (default: none)')
parser.add_argument('--output', default='', type=str, metavar='SAVE',
help='directory to save checkpoint (default: none)')
parser.add_argument('--test', default=0, type=int, metavar='TEST',
help='1 do test evaluation, 0 not')
parser.add_argument('--gpu', default='2, 3', type=str, metavar='N',
help='use gpu')
def main():
global args
args = parser.parse_args()
start_epoch = args.start_epoch
data_dir = args.input
save_dir = args.output
train_name = []
for name in os.listdir(data_dir):
if name.endswith("nii.gz"):
name = name.split(".")[-3]
train_name.append(name)
torch.manual_seed(0)
model = import_module(args.model)
config, net, loss, get_pbb = model.get_model()
if args.resume:
checkpoint = torch.load(args.resume)
if start_epoch == 0:
start_epoch = checkpoint['epoch'] + 1
net.load_state_dict(checkpoint['state_dict'])
else:
if start_epoch == 0:
start_epoch = 1
if not os.path.exists(save_dir):
os.makedirs(save_dir)
logfile = os.path.join(save_dir, 'log')
if args.test != 1:
sys.stdout = Logger(logfile)
pyfiles = [f for f in os.listdir('./') if f.endswith('.py')]
for f in pyfiles:
shutil.copy(f, os.path.join(save_dir, f))
net = net.cuda()
loss = loss.cuda()
cudnn.benchmark = True
net = DataParallel(net)
dataset = data.TrainDetector(
data_dir,
train_name,
config)
train_loader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True)
optimizer = torch.optim.SGD(
net.parameters(),
args.lr,
momentum=0.9,
weight_decay=args.weight_decay)
def get_lr(epoch):
if epoch <= 80:#args.epochs * 0.8:
lr = args.lr
elif epoch <= 120:#args.epochs * 0.9:
lr = 0.1 * args.lr
else:
lr = 0.01 * args.lr
return lr
loss_total_l, loss_class_l, loss_regress_l, tpr_l, tnr_l = [], [], [], [], []
for epoch in range(start_epoch, args.epochs + 1):
print("epoch",epoch)
loss_total,loss_class,loss_regress,tpr,tnr = train(train_loader, net, loss, epoch, optimizer, get_lr, args.save_freq, save_dir)
loss_total_l.append(loss_total)
loss_class_l.append(loss_class)
loss_regress_l.append(loss_regress)
tpr_l.append(tpr)
tnr_l.append(tnr)
plot(save_dir + 'train_curves.png', loss_total_l, loss_class_l, loss_regress_l, tpr_l, tnr_l)
np.savez(save_dir + 'train_curves.npz',
loss_total = np.array(loss_total_l),
loss_class = np.array(loss_class_l),
loss_regress = np.array(loss_regress_l),
tpr = np.array(tpr_l),
tnr = np.array(tnr_l))
def train(data_loader, net, loss, epoch, optimizer, get_lr, save_freq, save_dir):
start_time = time.time()
net.train()
lr = get_lr(epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
metrics = []
for i, (data, target, coord) in enumerate(data_loader):
data = Variable(data.cuda(async=True))
target = Variable(target.cuda(async=True))
coord = Variable(coord.cuda(async=True))
output = net(data, coord)
loss_output = loss(output, target)
optimizer.zero_grad()
loss_output[0].backward()
optimizer.step()
loss_output[0] = loss_output[0].item()
print("loss:\033[1;35m{}\033[0m, class:{}, reg:{},".format(loss_output[0],loss_output[1],loss_output[2]))
metrics.append(loss_output)
if epoch % save_freq == 0:
state_dict = net.module.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].cpu()
torch.save({
'epoch': epoch,
'save_dir': save_dir,
'state_dict': state_dict,
'args': args},
os.path.join(save_dir, '%03d.ckpt' % epoch))
end_time = time.time()
metrics = np.asarray(metrics, np.float32)
tpr = 100.0 * np.sum(metrics[:, 6]) / np.sum(metrics[:, 7])
tnr = 100.0 * np.sum(metrics[:, 8]) / np.sum(metrics[:, 9])
loss_total = np.mean(metrics[:, 0])
loss_class = np.mean(metrics[:, 1])
loss_regress = [np.mean(metrics[:, 2]), np.mean(metrics[:, 3]), np.mean(metrics[:, 4]), np.mean(metrics[:, 5])]
print("metrics",metrics[:, 6])
print('Epoch %03d (lr %.5f)' % (epoch, lr))
print('Train: tpr %3.2f, tnr %3.2f, total pos %d, total neg %d, time %3.2f' % (
100.0 * np.sum(metrics[:, 6]) / np.sum(metrics[:, 7]),
100.0 * np.sum(metrics[:, 8]) / np.sum(metrics[:, 9]),
np.sum(metrics[:, 7]),
np.sum(metrics[:, 9]),
end_time - start_time))
print('loss %2.4f, classify loss %2.4f, regress loss %2.4f, %2.4f, %2.4f, %2.4f' % (
np.mean(metrics[:, 0]),
np.mean(metrics[:, 1]),
np.mean(metrics[:, 2]),
np.mean(metrics[:, 3]),
np.mean(metrics[:, 4]),
np.mean(metrics[:, 5])))
return loss_total, loss_class, loss_regress, tpr, tnr
if __name__ == '__main__':
main()
``` |
{
"source": "JieJayCao/ET-BERT",
"score": 3
} |
#### File: ET-BERT/datasets/dataset_cleanning.py
```python
from ml_classifier import unlabel_data
def deal_label():
lower_label = [23,24,35,44,76,94,95]
lower_label.extend([22,28,52,62,67,102,104])
lower_label.sort()
return lower_label
def deal_finetuning(excluding_label):
dataset_path = "I:\\datasets\\cstnet-tls1.3\\"
save_dataset_path = dataset_path
with open(dataset_path+"train_dataset.tsv",'r') as f:
train_data = f.read().split('\n')[1:]
with open(dataset_path+"valid_dataset.tsv",'r') as f:
valid_data = f.read().split('\n')[1:]
with open(dataset_path+"test_dataset.tsv",'r') as f:
test_data = f.read().split('\n')[1:]
for label_number in excluding_label:
train_pop_index = []
valid_pop_index = []
test_pop_index = []
for index in range(len(train_data)):
if str(label_number)+'\t' in train_data[index]:
train_pop_index.append(index)
for counter,index in enumerate(train_pop_index):
index = index - counter
train_data.pop(index)
for index in range(len(valid_data)):
if str(label_number)+'\t' in valid_data[index]:
valid_pop_index.append(index)
for counter,index in enumerate(valid_pop_index):
index = index - counter
valid_data.pop(index)
for index in range(len(test_data)):
if str(label_number)+'\t' in test_data[index]:
test_pop_index.append(index)
for counter,index in enumerate(test_pop_index):
index = index - counter
test_data.pop(index)
label_number = 120
count = 0
while label_number > 105:
for index in range(len(train_data)):
data = train_data[index]
if str(label_number)+'\t' in data:
new_data = data.replace(str(label_number)+'\t',str(excluding_label[count])+'\t')
train_data[index] = new_data
for index in range(len(valid_data)):
if str(label_number)+'\t' in valid_data[index]:
new_data = valid_data[index].replace(str(label_number)+'\t',str(excluding_label[count])+'\t')
valid_data[index] = new_data
for index in range(len(test_data)):
if str(label_number)+'\t' in test_data[index]:
new_data = test_data[index].replace(str(label_number)+'\t',str(excluding_label[count])+'\t')
test_data[index] = new_data
label_number -= 1
count += 1
with open(save_dataset_path+"train_dataset.tsv",'w') as f:
f.write("label\ttext_a\n")
for data in train_data:
f.write(data+'\n')
with open(save_dataset_path+"valid_dataset.tsv",'w') as f:
f.write("label\ttext_a\n")
for data in valid_data:
f.write(data+'\n')
with open(save_dataset_path+"test_dataset.tsv",'w') as f:
f.write("label\ttext_a\n")
for data in test_data:
f.write(data+'\n')
deal_result = input("please delete the last blank line in %s and input '1'"%(save_dataset_path+"test_dataset.tsv"))
if deal_result == '1':
unlabel_data(save_dataset_path+"test_dataset.tsv")
return 0
def deal_test(excluding_label):
dataset_path = "I:\\unused_IP_samples\\encrypted_dataset\\"
test_data_path = dataset_path + "test_dataset.tsv"
with open(test_data_path,'r') as f:
test_data = f.read().split('\n')[1:]
for label_number in excluding_label:
test_pop_index = []
for index in range(len(test_data)):
if str(label_number)+'\t' in test_data[index]:
test_pop_index.append(index)
for counter,index in enumerate(test_pop_index):
index = index - counter
test_data.pop(index)
label_number = 120
count = 0
while label_number > 105:
for index in range(len(test_data)):
if str(label_number) + '\t' in test_data[index]:
new_data = test_data[index].replace(str(label_number)+'\t', str(excluding_label[count])+'\t')
test_data[index] = new_data
label_number -= 1
count += 1
with open(dataset_path + "new_106\\"+"test_dataset.tsv",'w') as f:
f.write("label\ttext_a\n")
for data in test_data:
f.write(data+'\n')
deal_result = input("please delete the last blank line in %s and input '1'" % (dataset_path + "new_106\\test_dataset.tsv"))
if deal_result == '1':
unlabel_data(dataset_path + "new_106\\test_dataset.tsv")
return 0
if __name__ == '__main__':
excluding_laebl = deal_label()
deal_finetuning(excluding_laebl)
#deal_test(excluding_laebl)
``` |
{
"source": "JieJhih/onos",
"score": 2
} |
#### File: tools/build/onos-upload-artifacts.py
```python
from subprocess import call
import tempfile
import hashlib
import requests, os
import xml.etree.ElementTree, shutil
import time
SONATYPE_USER=os.environ.get("SONATYPE_USER")
SONATYPE_PASSWORD=<PASSWORD>("<PASSWORD>YPE_PASSWORD")
SONATYPE_PROFILE=os.environ.get("SONATYPE_PROFILE")
CREATE_REPO_REQUEST_TEMPLATE = '''\
<promoteRequest>
<data>
<description>%(description)</description>
</data>
</promoteRequest>
'''
CLOSE_REPO_REQUEST_TEMPLATE = '''\
<promoteRequest>
<data>
<description>%(description)</description>
<stagedRepositoryId>%(repo_id)</stagedRepositoryId>
</data>
</promoteRequest>
'''
CLOSE_RETRY_ATTEMPTS = 12 * 2
def hashlib_compute(hash, input_file, output_file):
with open(input_file, 'rb') as f:
for block in iter(lambda: f.read(100000), b''):
hash.update(block)
md5_string = hash.hexdigest()
output = open(output_file, "w")
output.write(md5_string + "\n")
f.close()
output.close()
def generate_metadata_files(input_file, dest):
# create a temporary directory to hold the metadata files
global tempdir
base_metadata_filename = tempdir + "/" + os.path.basename(dest)
files = []
# generate the signature file
signature_filename = base_metadata_filename + ".asc"
call(["gpg", "--armor", "--detach-sig", "--output", signature_filename, input_file])
files.append(signature_filename)
# generate the md5 checksum file
md5_filename = base_metadata_filename + ".md5"
md5 = hashlib.md5()
hashlib_compute(md5, input_file, md5_filename)
files.append(md5_filename)
# generate the SHA checksum file
sha1_filename = base_metadata_filename + ".sha1"
sha1 = hashlib.sha1()
hashlib_compute(sha1, input_file, sha1_filename)
files.append(sha1_filename)
# generate the base artifact
base_artifact_filename = base_metadata_filename
shutil.copyfile(input_file, base_artifact_filename)
files.append(base_artifact_filename)
return files
def create_staging_repo(description):
if destination_repo_url is None:
return None
create_request = CREATE_REPO_REQUEST_TEMPLATE.replace("%(description)", description)
url = "https://" + destination_repo_url + "/service/local/staging/profiles" + "/" + SONATYPE_PROFILE + "/start"
headers = {'Content-Type': 'application/xml'}
r = requests.post(url, create_request, headers=headers, auth=(SONATYPE_USER, SONATYPE_PASSWORD))
root = xml.etree.ElementTree.fromstring(r.text)
repo_id = root.find("data").find("stagedRepositoryId").text
return repo_id
def close_staging_repo(description, repo_id):
if destination_repo_url is None:
return
close_request = CLOSE_REPO_REQUEST_TEMPLATE.replace("%(description)", description).replace("%(repo_id)", repo_id)
url = "https://" + destination_repo_url + "/service/local/staging/profiles" + "/" + SONATYPE_PROFILE + "/finish"
headers = {'Content-Type': 'application/xml'}
r = requests.post(url, close_request, headers=headers, auth=(SONATYPE_USER, SONATYPE_PASSWORD))
def wait_for_staging_repo(description, repo_id):
if destination_repo_url is None:
return
base_url = "https://" + destination_repo_url + "/service/local/staging/profiles" + "/" + SONATYPE_PROFILE
close_request = CLOSE_REPO_REQUEST_TEMPLATE.replace("%(description)", description).replace("%(repo_id)", repo_id)
url = base_url + "/finish"
headers = {'Content-Type': 'application/xml'}
repo_query_url = "https://oss.sonatype.org/service/local/staging/repository/" + repo_id
attempt = 1
print ("waiting for repo to close...")
while True:
r = requests.get(repo_query_url, close_request, headers=headers, auth=(SONATYPE_USER, SONATYPE_PASSWORD))
root = xml.etree.ElementTree.fromstring(r.text)
transitioning = root.find("transitioning").text
if transitioning != "true":
break
if attempt == CLOSE_RETRY_ATTEMPTS:
print ("Unable to close repo")
sys.exit(1)
attempt = attempt + 1
time.sleep(5)
print ("Repo closed successfully")
def stage_file(file, repo_id, dest):
filename_in_repo = os.path.dirname(dest) + "/" + os.path.basename(file)
if destination_repo_url is not None:
# deploy to Nexus repo
upload_base = "https://" + destination_repo_url + "/service/local/staging/deployByRepositoryId"
url = upload_base + "/" + repo_id + "/" + filename_in_repo
headers = {'Content-Type': 'application/xml'}
with open(file, 'rb') as f:
r = requests.post(url, data=f.read(), headers=headers, auth=(SONATYPE_USER, SONATYPE_PASSWORD))
if r.status_code != 201:
print (r.status_code)
print (r.text)
sys.exit(1)
else:
# deploy to local repo
file_in_local_repo = os.path.expanduser(local_maven_repo + "/" + filename_in_repo)
dir_in_local_repo = os.path.dirname(file_in_local_repo)
if not os.path.isdir(dir_in_local_repo):
os.makedirs(dir_in_local_repo)
shutil.copyfile(src, file_in_local_repo)
def stage_files(files, dest):
for file in files:
stage_file(file=file, repo_id=repo_id, dest=dest)
def upload_file(src, dest):
print ("publishing: " + dest.replace("org/onosproject", ""))
files = generate_metadata_files(src, dest)
stage_files(files, dest)
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print 'USAGE: upload-maven-artifacts catalog-file-name [nexus root url]'
sys.exit(1)
input_list_file = sys.argv[1]
local_maven_repo = None
destination_repo_url = None
if len(sys.argv) == 3:
destination_repo_url = sys.argv[2]
else:
local_maven_repo = os.environ.get("MAVEN_REPO")
if local_maven_repo is None:
local_maven_repo = "~/.m2/repository"
if destination_repo_url is not None:
if SONATYPE_USER is None:
print "Environment variable SONATYPE_USER must be set"
sys.exit(1)
if SONATYPE_PASSWORD is None:
print "Environment variable SONATYPE_PASSWORD must be set"
sys.exit(1)
if SONATYPE_PROFILE is None:
print "Environment variable SONATYPE_PROFILE must be set"
sys.exit(1)
print ("Uploading to remote repo: " + destination_repo_url)
else:
print ("Installing in local repo: " + local_maven_repo)
list_file = open(input_list_file, "r")
lines = list_file.readlines()
list_file.close()
tempdir = tempfile.mkdtemp(prefix="upload-maven-artifacts-")
description = "test repo"
repo_id = create_staging_repo(description)
for line in lines:
s = line.split()
src = s[0]
dest = s[1]
upload_file(src, dest)
close_staging_repo(repo_id=repo_id, description=description)
wait_for_staging_repo(repo_id=repo_id, description=description)
shutil.rmtree(tempdir)
``` |
{
"source": "jiejiang/courier",
"score": 2
} |
#### File: courier/issue_order/context_processors.py
```python
from models import Route
def routes(request):
return {
'routes': Route.objects.order_by('name')
}
```
#### File: courier/issue_order/models.py
```python
from __future__ import unicode_literals
import os, json, re
from collections import OrderedDict
from math import ceil
from django.contrib.postgres.fields import JSONField
from django.db.models.signals import post_save, pre_delete, post_delete
from django.dispatch import receiver
from django.forms.models import model_to_dict
from django.http.request import QueryDict
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.db.models import Sum, Count
COURIER_SYSTEMS = (
('jixun', _(u"吉讯CC线")),
('postal', _(u"邮政BC线")),
('yunda', _(u"韵达CC线")),
('heshan', _(u"鹤山CC线")),
('xian', _(u"西安CC线")),
)
COURIER_SYSTEMS_MAP = {key: value for key, value in COURIER_SYSTEMS}
class PickupOption:
SELF_SERVE = 'self_serve'
PICKUP_REQUIRED = 'pickup_required'
COMPANY_PICKUP = 'company_pickup'
DPD_PICKUP = 'dpd_pickup'
class CourierBatch(models.Model):
STATUS = (
(0, _(u"等待")),
(1, _(u"成功")),
(2, _(u"失败")),
(3, _(u"删除")),
)
user = models.ForeignKey("auth.User", related_name="courier_batches", on_delete=models.CASCADE)
uuid = models.CharField(_(u"UUID"), max_length=64, null=True, blank=True, unique=True, db_index=True)
percentage = models.CharField(_("Percentage"), null=True, blank=True, max_length=16)
status = models.CharField(_("Status"), null=True, blank=True, max_length=32, db_index=True)
creation_time = models.DateTimeField(_("Creation Time"), auto_now_add=True, null=True, blank=True)
system = models.CharField(_("System Name"), max_length=32, db_index=True, null=False, blank=False,
choices=COURIER_SYSTEMS)
rate = models.DecimalField(_("Rate per Package"), null=True, blank=True, max_digits=10, decimal_places=2)
credit = models.DecimalField(_("Credit"), null=True, blank=True, max_digits=10, decimal_places=2)
state = models.IntegerField(_("State"), null=False, blank=False, db_index=True, default=2)
message = models.TextField(_("Message"), null=True, blank=True)
num_order = models.IntegerField(_("Number of Orders"), null=True, blank=True)
@property
def system_name(self):
return COURIER_SYSTEMS_MAP[self.system] if self.system in COURIER_SYSTEMS_MAP else "N/A"
class Profile(models.Model):
user = models.OneToOneField("auth.User")
credit = models.DecimalField(default=0, max_digits=10, decimal_places=2)
locked_credit = models.DecimalField(default=0, max_digits=10, decimal_places=2) #deprecated
#credit order helper context
PICKUP_CHOICES = (
(PickupOption.SELF_SERVE, _(u'自送曼城仓库')),
(PickupOption.PICKUP_REQUIRED, _(u'上门取件')),
)
pickup_option = models.CharField(choices=PICKUP_CHOICES, default=PickupOption.SELF_SERVE, max_length=64)
pickup_address = models.ForeignKey("Address", on_delete=models.PROTECT, related_name='+', null=True)
@property
def system_number(self):
return CourierRate.objects.filter(user=self.user).count()
@property
def valid_order_number(self):
obj = CourierBatch.objects.filter(user=self.user, state=CourierBatch.STATUS[1][0])\
.aggregate(models.Sum('num_order'))
return obj['num_order__sum'] if 'num_order__sum' in obj and obj['num_order__sum'] is not None else 0
class CourierRate(models.Model):
user = models.ForeignKey("auth.User", on_delete=models.CASCADE)
system = models.CharField(_("System Name"), max_length=32, db_index=True, null=False, blank=False,
choices=COURIER_SYSTEMS)
rate = models.DecimalField(_("Rate per Package"), null=False, blank=False, max_digits=10, decimal_places=2)
class Meta:
unique_together = ('user', 'system')
class GroupPrice(models.Model):
group = models.ForeignKey("auth.Group", on_delete=models.CASCADE, related_name="prices")
system = models.CharField(_(u"线路"), max_length=32, db_index=True, null=False, blank=False,
choices=COURIER_SYSTEMS)
price_4_pieces_high = models.DecimalField(_(u"4罐包含1/2段"), null=True, blank=True, max_digits=10,
decimal_places=2)
price_4_pieces_low = models.DecimalField(_(u"4罐只包含3/4段"), null=True, blank=True, max_digits=10,
decimal_places=2)
price_6_pieces_high = models.DecimalField(_(u"6罐单价介于22与13磅之间"), null=True, blank=True, max_digits=10,
decimal_places=2)
price_6_pieces_low = models.DecimalField(_(u"6罐单价低于13磅(含)"), null=True, blank=True, max_digits=10,
decimal_places=2)
@property
def details(self):
return COURIER_SYSTEMS_MAP[self.system] + ": " + \
" / ".join(map(lambda x: str(x),
(self.price_4_pieces_high, self.price_4_pieces_low,
self.price_6_pieces_high, self.price_6_pieces_low)))
class Meta:
unique_together = ('group', 'system')
class Address(models.Model):
GB = 'GB'
CN = 'CN'
COUNTRY_CHOICES = (
(GB, u'UK'),
(CN, u'中国'),
)
user = models.ForeignKey("auth.User", on_delete=models.CASCADE)
is_template = models.BooleanField(default=False)
country = models.CharField(max_length=32, choices=COUNTRY_CHOICES)
post_code = models.CharField(max_length=32)
contact_name = models.CharField(max_length=64)
address_line_1 = models.CharField(max_length=64)
address_line_2 = models.CharField(max_length=64, blank=True, null=True)
province = models.CharField(max_length=32, blank=True, null=True)
city = models.CharField(max_length=32)
district = models.CharField(max_length=32, blank=True, null=True)
contact_number = models.CharField(max_length=32)
id_number = models.CharField(max_length=32, blank=True, null=True)
alias = models.CharField(max_length=32, blank=True, null=True)
def __str__(self):
text = ''
if self.country == self.GB:
text = u", ".join(
(self.contact_name, self.address_line_1, self.city, self.post_code,
self.get_country_display(), 'Tel:%s' % self.contact_number))
elif self.country == self.CN:
text = " ".join(
(self.contact_name,
"".join((self.get_country_display(), self.province, self.city,
self.district, self.address_line_1)),
u'邮编:%s' % self.post_code, u'电话:%s' % self.contact_number,
u'证件:%s' % self.id_number))
if self.alias:
text = (u'%s (%s)' % (self.alias, text))
return text.encode('utf-8')
@classmethod
def get_sender_address_templates(cls, user):
return cls.objects.filter(user=user, is_template=True, country=Address.GB)
@classmethod
def get_receiver_address_template(cls, user):
return cls.objects.filter(user=user, is_template=True, country=Address.CN)
class PricingRule(models.Model):
name = models.CharField(max_length=32)
config = JSONField()
def __unicode__(self):
return self.name
@property
def default_price(self):
return float(self.config['default_price'])
def normalize_input(self, data, user):
package = {}
if isinstance(data, basestring):
package = json.loads(data)
def get_value(obj, field, convert):
value = obj.get(field, '')
return convert(value) if value is not None and value != '' else 0
for field in ('length', 'width', 'height'):
package[field] = get_value(package, field, int)
package['gross_weight'] = get_value(package, 'gross_weight', float)
for i in xrange(len(package.get('item_set', []))):
if package['item_set'][i].get('count', ''):
package['item_set'][i]['count'] = int(package['item_set'][i]['count'])
elif isinstance(data, Package):
package = model_to_dict(data)
package['item_set'] = [model_to_dict(item) for item in data.item_set.all()]
else:
raise NotImplementedError('input not supported yet')
#common fields
for field in ('sender_address', 'receiver_address'):
if package.get(field, ''):
package[field] = Address.objects.filter(pk=int(package.get(field)), user=user).first()
return package
def make_package_price(self, data, user):
if not data:
raise Exception, "Empty input"
package = self.normalize_input(data, user)
if self.config.get('type') == 'flat':
return self.default_price
elif self.config.get('type') == 'weight_based':
levels = int(self.config.get('levels'))
volumetric_weight = Package.calculate_volumetric_weight(package['length'], package['width'], package['height'])
weight = volumetric_weight if volumetric_weight > package['gross_weight'] else package['gross_weight']
price = None
for i in range(levels):
if weight <= float(self.config.get('level_%d' % i)):
price = float(self.config.get('level_%d_price' % i))
break
if price is None:
price = float(self.config.get('extra_price_per_kg')) \
* ceil(weight - self.config.get('level_%d' % (levels-1))) \
+ float(self.config.get('level_%d_price' % (levels-1)))
return price
else:
raise NotImplementedError('type not supported')
class Route(models.Model):
system = models.CharField(max_length=32, db_index=True, null=False, blank=False, choices=COURIER_SYSTEMS)
code = models.CharField(max_length=64, db_index=True, null=False, blank=False)
name = models.CharField(max_length=64)
slug = models.SlugField(max_length=64, db_index=True, null=False, blank=False, unique=True)
max_items_per_package = models.PositiveIntegerField(blank=True, null=True)
max_weight_per_package = models.DecimalField(blank=True, null=True, max_digits=4, decimal_places=1)
pricing_rule = models.ForeignKey(PricingRule, on_delete=models.SET_NULL, null=True, blank=True)
is_online = models.BooleanField(default=True, blank=False)
def __unicode__(self):
return self.name
class Meta:
unique_together = ('system', 'code',)
class CourierOrder(models.Model):
class STATUS:
CREATED = 'created'
PAID = 'paid'
SUBMITTED = 'submitted'
GENERATED = 'generated'
FAILED = 'failed'
CANCELLED = 'cancelled'
STATUS_CHOICES = (
(STATUS.CREATED, u'已下单'),
(STATUS.PAID, u'已支付'),
(STATUS.SUBMITTED, u'已提交'),
(STATUS.GENERATED, u'已生成'),
(STATUS.FAILED, u'系统错误'),
(STATUS.CANCELLED, u'已取消'),
)
user = models.ForeignKey("auth.User", on_delete=models.CASCADE)
route = models.ForeignKey(Route, on_delete=models.PROTECT, null=False, blank=False)
status = models.CharField(max_length=16, db_index=True, null=False, blank=False, choices=STATUS_CHOICES,
default=STATUS.CREATED)
last_update = models.DateTimeField(auto_now_add=True)
price = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=2)
def calculate_price(self):
# get pricing rule, TODO choose user specific pricing rule
if not self.route.pricing_rule:
raise Exception, "no route pricing available"
pricing_rule = self.route.pricing_rule
self.price = sum([package.calculate_price(pricing_rule) for package in self.package_set.all()])
self.save()
return self.price
class Package(models.Model):
order = models.ForeignKey(CourierOrder, on_delete=models.CASCADE)
sender_address = models.ForeignKey(Address, on_delete=models.PROTECT, related_name='+', null=True)
receiver_address = models.ForeignKey(Address, on_delete=models.PROTECT, related_name='+', null=True)
gross_weight = models.DecimalField(max_digits=4, decimal_places=1)
length = models.PositiveIntegerField()
width = models.PositiveIntegerField()
height = models.PositiveIntegerField()
price = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=2)
@classmethod
def calculate_volumetric_weight(cls, length, width, height):
return length * width * height / 5000.
def calculate_price(self, pricing_rule):
self.price = pricing_rule.make_package_price(self, self.order.user)
self.save()
return self.price
@property
def effective_gross_weight(self):
volumetric_weight = self.calculate_volumetric_weight(self.length, self.width, self.height)
return volumetric_weight if volumetric_weight > self.gross_weight else self.gross_weight
@receiver(post_delete, sender=Package)
def __on_delete_package(**kwargs):
package = kwargs.get('instance')
if package.sender_address:
package.sender_address.delete()
if package.receiver_address:
package.receiver_address.delete()
class Item(models.Model):
package = models.ForeignKey(Package, on_delete=models.CASCADE)
name = models.CharField(max_length=128)
count = models.PositiveIntegerField(default=1)
__CN_ADDRESSES = None
def get_cn_addresses(province_name=None, city_name=None):
global __CN_ADDRESSES
if __CN_ADDRESSES is None:
__cn_address_json = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "order_api",
"json", "cities.json")
with open(__cn_address_json) as f:
data = json.load(f)
__CN_ADDRESSES = OrderedDict()
for province in data:
province_data = OrderedDict()
for city in province['contains']:
city_data = []
for district in city['contains']:
city_data.append(district['name'])
province_data[city['name']] = city_data
__CN_ADDRESSES[province['name']] = province_data
if province_name is not None:
if city_name is not None:
return __CN_ADDRESSES.get(province_name, {}).get(city_name, [])
else:
return __CN_ADDRESSES.get(province_name, {}).keys()
else:
return __CN_ADDRESSES.keys()
def get_courier_cart_stats(user):
orders = CourierOrder.objects.filter(user=user, status=CourierOrder.STATUS.CREATED)
stats = orders.aggregate(total_order_price=Sum('price'))
if stats['total_order_price'] is None:
stats['total_order_price'] = 0
stats['total_package_count'] = 0
stats['total_gross_weight'] = 0
for order in orders:
for package in order.package_set.all():
stats['total_package_count'] += 1
stats['total_gross_weight'] += package.effective_gross_weight
return stats
def calculate_cost_with_pickup(pickup_type, pickup_address, cart_stats):
ret = {
'total_price': None,
'pickup_price': None,
'pickup_option': PickupOption.PICKUP_REQUIRED,
}
if pickup_type == PickupOption.SELF_SERVE:
ret['total_price'] = cart_stats['total_order_price']
ret['pickup_price'] = 0
ret['pickup_option'] = PickupOption.SELF_SERVE
else:
if pickup_address is None or pickup_address.country != Address.GB or not pickup_address.post_code: #set to none to disable checkout
pass
else:
if re.match(r'^M\d', pickup_address.post_code, flags=re.I):
ret['pickup_price'] = 3 if cart_stats['total_package_count'] < 5 else 0
ret['pickup_option'] = PickupOption.COMPANY_PICKUP
else:
ret['pickup_price'] = 10
if cart_stats['total_gross_weight'] > 20:
ret['pickup_price'] += ceil(cart_stats['total_gross_weight'] - 20) * 0.4
ret['pickup_option'] = PickupOption.DPD_PICKUP
ret['total_price'] = ret['pickup_price'] + cart_stats['total_order_price']
return ret
```
#### File: courier/issue_order/products.py
```python
import redis, time, sys, json, ast
from mezzanine.conf import settings
from courier_systems import query_products
r = redis.StrictRedis(host='localhost', port=6379, db=0)
__value_timeout = 60 * 10
__lock_timeout = 10
__refresh_wait = 2
__max_retry = 10
def __obtain_lock(lock_key):
cur_time = int(time.time())
my_timeout = cur_time + __lock_timeout + 1
if r.setnx(lock_key, my_timeout):
return True
else:
last_timeout = int(r.get(lock_key))
if last_timeout >= cur_time:
return False
new_timeout = int(r.getset(lock_key, my_timeout))
if new_timeout == last_timeout:
return True
return False
def __release_lock(lock_key):
r.delete(lock_key)
def query_product_info(system, code):
system_config = settings.COURIER_SYSTEMS[system]
params = {
'url_base': system_config['url_base'],
'user_name': system_config['user_name'],
'password': <PASSWORD>['password'],
'code': code,
}
return query_products(**params)
def __get_product_info(system, code):
value_key, lock_key = 'courier:%s:%s:products:value' % (system, code), 'courier:%s:%s:products:lock' % (system, code)
value = r.get(value_key)
if value is None:
if __obtain_lock(lock_key):
try:
value = query_product_info(system, code)
r.setex(value_key, __value_timeout, json.dumps(value))
except Exception, inst:
import traceback
traceback.print_exc(sys.stderr)
print >> sys.stderr, "Failed to obtain access token: %s" % str(inst)
finally:
__release_lock(lock_key)
else:
value = json.loads(value)
return value
def get_product_info(route):
product_info = None
retry = 0
while product_info is None:
product_info = __get_product_info(route.system, route.code)
if product_info is None:
retry += 1
if retry > __max_retry:
raise Exception, "Maximum number to retry service"
time.sleep(__refresh_wait)
return product_info if product_info is not None else {}
```
#### File: courier/issue_order/validators.py
```python
import os
from django.utils.translation import ugettext as _
from django.core.exceptions import ValidationError
def validate_file_extension(value):
ext = os.path.splitext(value.name)[1] # [0] returns path+filename
valid_extensions = ['.xlsx', '.xls', '.xlt']
if not ext.lower() in valid_extensions:
raise ValidationError(_(u'文件后缀名不支持'))
```
#### File: courier/order_api/admin.py
```python
from django.contrib import admin
from django.contrib.admin.options import InlineModelAdmin
from django.contrib.admin.utils import flatten_fieldsets
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserChangeForm
from mezzanine.core.admin import TabularDynamicInlineAdmin
from order_api.models import Product, Request, Profile, Package, PackageItem, Route
def api_profile(self):
if not hasattr(self, '_api_profile'):
self._api_profile = Profile.objects.create(user=self)
self._api_profile.save()
return self._api_profile
User.api_profile = property(lambda self: api_profile(self))
class ProductAdmin(admin.ModelAdmin):
list_display = ('name', 'barcode', 'internal_name', 'unit_price', 'is_enabled')
class PackageAdmin(TabularDynamicInlineAdmin):
model = Package
ordering = ('id',)
fields = ('tracking_no', 'cost', 'sender_name', 'sender_phone_number', 'sender_address', 'receiver_name',
'receiver_phone_number', 'receiver_address', 'receiver_city', 'receiver_post_code', 'receiver_id_number',
'items_detail')
def get_readonly_fields(self, request, obj=None):
return self.fields
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class RequestAdmin(admin.ModelAdmin):
inlines = (PackageAdmin, )
list_display = ('request_no', 'creation_date', 'owner', 'status', 'system', 'test_mode', 'total_cost')
readonly_fields = ['total_cost', 'status']
def get_readonly_fields(self, request, obj=None):
return list(self.readonly_fields) + \
[field.name for field in obj._meta.fields] + \
[field.name for field in obj._meta.many_to_many]
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class RouteAdmin(admin.ModelAdmin):
list_display = ('name', 'code', 'is_enabled')
admin.site.register(Product, ProductAdmin)
admin.site.register(Request, RequestAdmin)
admin.site.register(Route, RouteAdmin)
```
#### File: courier/order_api/serializers.py
```python
from rest_framework import serializers
import six
from order_api.models import Product, Request, Package, PackageItem, Route
class ProductSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="product-detail", lookup_field='barcode')
name = serializers.ReadOnlyField()
barcode = serializers.ReadOnlyField()
class Meta:
model = Product
fields = ('url', 'name', 'barcode')
class RouteSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="route-detail", lookup_field='code')
code = serializers.ReadOnlyField()
name = serializers.ReadOnlyField()
class Meta:
model = Route
fields = ('url', 'code', 'name')
class PackageItemSerializer(serializers.ModelSerializer):
name = serializers.ChoiceField(choices=Product.objects.values_list('name', flat=True),
source="product.name", required=False)
barcode = serializers.ChoiceField(choices=Product.objects.values_list('barcode', flat=True),
source="product.barcode", required=False)
count = serializers.IntegerField()
class Meta:
model = PackageItem
fields = ('name', 'barcode', 'count')
class TrackingNoHyperlinkedIdentityField(serializers.HyperlinkedIdentityField):
def __init__(self, *args, **kwargs):
return super(TrackingNoHyperlinkedIdentityField, self).__init__(*args, **kwargs)
def to_representation(self, value):
if not value.tracking_no:
return None
return super(TrackingNoHyperlinkedIdentityField, self).to_representation(value)
class RequestHyperlinkedIdentityField(serializers.HyperlinkedIdentityField):
def __init__(self, *args, **kwargs):
return super(RequestHyperlinkedIdentityField, self).__init__(*args, **kwargs)
def to_representation(self, value):
if value.status_code <> Request.StatusCode.SUCCEEDED:
return None
return super(RequestHyperlinkedIdentityField, self).to_representation(value)
class PackageSerializer(serializers.ModelSerializer):
package_no = serializers.CharField(source="external_package_no", required=False)
url = TrackingNoHyperlinkedIdentityField(view_name="package-detail", lookup_field='slug')
tracking_no = serializers.ReadOnlyField()
waybill = TrackingNoHyperlinkedIdentityField(view_name="waybill-detail", lookup_field='slug')
request = serializers.HyperlinkedIdentityField(view_name="request-detail", lookup_field="request_no")
items = PackageItemSerializer(many=True, source="packageitem_set")
cost = serializers.ReadOnlyField()
class Meta:
model = Package
fields = ('url', 'package_no', 'tracking_no', 'waybill', 'request', 'sender_name', 'sender_phone_number', 'sender_address',
'receiver_name', 'receiver_phone_number', 'receiver_address', 'receiver_city', 'receiver_post_code',
'receiver_id_number', 'weight', 'length', 'width', 'height', 'items', 'cost')
class RequestSerializer(serializers.HyperlinkedModelSerializer):
route_code_choices = Route.objects.values_list('code', flat=True)
order_no = serializers.CharField(source="external_order_no", required=False)
url = serializers.HyperlinkedIdentityField(view_name="request-detail", lookup_field='request_no')
route_code = serializers.ChoiceField(required=False, choices=route_code_choices, source='route.code')
test_mode = serializers.ReadOnlyField()
request_no = serializers.ReadOnlyField()
waybills = RequestHyperlinkedIdentityField(view_name="request-waybill-detail", lookup_field='request_no')
creation_date = serializers.DateTimeField(read_only=True)
status = serializers.ReadOnlyField()
error_msg = serializers.ReadOnlyField()
packages = PackageSerializer(many=True)
total_cost = serializers.ReadOnlyField()
class Meta:
model = Request
fields = ('url', 'order_no', 'route_code', 'test_mode', 'request_no', 'waybills', 'creation_date', 'status', 'error_msg',
'packages', 'total_cost')
lookup_field = "request_no"
class WaybillSerializer(serializers.ModelSerializer):
tracking_no = serializers.ReadOnlyField()
waybill = TrackingNoHyperlinkedIdentityField(view_name="waybill-detail", lookup_field='slug')
package = TrackingNoHyperlinkedIdentityField(view_name="package-detail", lookup_field='slug')
class Meta:
model = Package
fields = ('tracking_no', 'waybill', 'package')
```
#### File: courier/sample_ticket/forms.py
```python
import os
from django.utils.translation import ugettext as _
from django.core.exceptions import ValidationError
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Fieldset, ButtonHolder, Div, Field, HTML
def validate_file_extension(value):
ext = os.path.splitext(value.name)[1]
valid_extensions = ['.xlsx', '.xls', '.xlt']
if not ext.lower() in valid_extensions:
raise ValidationError(_(u'文件后缀名不支持'))
class SampleTicketForm(forms.Form):
files = forms.FileField(label=_(u"上传文件"), widget=forms.ClearableFileInput(attrs={'multiple': True}),
validators=[validate_file_extension,])
is_jixun = forms.BooleanField(label=_(u"吉讯线小票"), required=False)
def __init__(self, *args, **kwargs):
super(SampleTicketForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-4'
self.helper.field_class = 'col-lg-8'
self.helper.layout = Layout(
'files',
'is_jixun',
ButtonHolder(
Submit('submit', _(u"上传"), css_class='btn btn-success btn-lg btn-block'),
)
)
``` |
{
"source": "jiejiang/inventory",
"score": 2
} |
#### File: app/admin/views.py
```python
__author__ = 'jie'
import os, zipfile
import pandas as pd
from markupsafe import Markup
from flask import redirect, url_for, flash, current_app
from flask_admin.contrib import sqla
from flask_admin.menu import MenuLink
from flask_admin.model.form import InlineFormAdmin
from sqlalchemy import func, desc, or_
from flask_user import current_user
from flask_admin.actions import action
from wtforms.validators import ValidationError
from .. import db
from . import admin
from ..models import Order, Job, ProductInfo, ProductCountInfo, Retraction, Route
from ..util import time_format
class LoginRequiredModelView(sqla.ModelView):
def is_accessible(self):
return current_user.is_authenticated
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for("user.login"))
class JobAdmin(LoginRequiredModelView):
can_delete = False
can_edit = False
can_create = False
def _show_status(view, context, model, name):
return model.status_string
def _show_item_count(view, context, model, name):
#return model.orders.count() #cannot use this because the case of order number re-use
job_file = os.path.join(current_app.config['DOWNLOAD_FOLDER'], model.uuid, model.uuid + '.zip')
if not os.path.exists(job_file):
return "%d (orders)" % model.orders.count()
try:
with zipfile.ZipFile(job_file) as z:
customs_df = pd.read_excel(z.open(u"江门申报单.xlsx"), converters={
u"企业运单编号": lambda x: str(x),
u"收件人省市区代码": lambda x: str(x),
u"收件人电话": lambda x: str(x),
u"收件人证件号码": lambda x: str(x),
u"发货人省市区代码": lambda x: str(x),
u"发货人电话": lambda x: str(x),
u"商品备案号": lambda x: str(x),
u"发货人电话": lambda x: str(x),
u'计量单位': lambda x: str(x),
})
return len(customs_df.index)
except Exception, inst:
return "%d (orders)" % model.orders.count()
column_formatters = {
'status': _show_status,
'creation_time': lambda v, c, m, p: time_format(m.creation_time),
'completion_time': lambda v, c, m, p: time_format(m.completion_time),
'item_count': _show_item_count,
}
class SuccessJobAdmin(JobAdmin):
list_template = "admin/job/list.html"
column_searchable_list = ('uuid', 'creation_time', 'issuer')
column_exclude_list = ('percentage', 'message')
column_default_sort = ('completion_time', True)
column_list = ('uuid', 'status', 'completion_time', 'item_count', 'version', 'issuer')
def get_query(self):
return self.session.query(self.model).filter(self.model.status == Job.Status.COMPLETED)
def get_count_query(self):
return self.session.query(func.count('*')).filter(self.model.status == Job.Status.COMPLETED)
class FailedJobAdmin(JobAdmin):
column_default_sort = ('creation_time', True)
column_list = ('uuid', 'status', 'creation_time', 'percentage', 'message', 'issuer')
def get_query(self):
return self.session.query(self.model).filter(or_(self.model.status == Job.Status.FAILED, self.model.status == Job.Status.DELETED))
def get_count_query(self):
return self.session.query(func.count('*')).filter(or_(self.model.status == Job.Status.FAILED, self.model.status == Job.Status.DELETED))
class OrderAdmin(LoginRequiredModelView):
def _show_type(view, context, model, name):
return model.type_name
column_formatters = {
'type': _show_type,
'upload_time': lambda v, c, m, p: time_format(m.upload_time),
'used_time': lambda v, c, m, p: time_format(m.used_time),
}
column_searchable_list = ('order_number', 'receiver_id_number', 'receiver_name')
column_exclude_list = ('job',)
form_excluded_columns = ('job',)
column_details_exclude_list = ('job',)
class UsedOrderAdmin(OrderAdmin):
list_template = "admin/order/list.html"
can_create = False
can_delete = False
can_edit = False
can_view_details = True
column_default_sort = ('used_time', True)
def get_query(self):
return self.session.query(self.model).filter(self.model.used == True)
def get_count_query(self):
return self.session.query(func.count('*')).filter(self.model.used == True)
@action('reuse', u"弃用单号", u"您确定需要弃用这些单号?")
def action_approve(self, ids):
try:
order_numbers = []
query = Order.query.filter(Order.id.in_(ids)).order_by(desc(Order.used_time))
for order in query.all():
order_numbers.append(order.order_number)
if order.retraction_id:
raise Exception, u"无法弃用已提取单号: %s" % order.order_number
order.discard()
db.session.commit()
flash(u"如下单号已经弃用:[%s]" % ", ".join(order_numbers))
except Exception, inst:
flash(u"无法弃用单号:[%s]。错误如下:\n%s" % (", ".join(order_numbers), str(inst)), "error")
class UnusedOrderAdmin(OrderAdmin):
can_create = False
can_edit = False
column_default_sort = ('order_number', False)
def get_query(self):
return self.session.query(self.model).filter(self.model.used==False, self.model.type == Order.Type.XIAN)
def get_count_query(self):
return self.session.query(func.count('*')).filter(self.model.used == False, self.model.type == Order.Type.XIAN)
class UnretractedOrderAdmin(OrderAdmin):
can_create = False
can_edit = False
can_delete = False
can_view_details = True
column_default_sort = ('used_time', True)
def get_query(self):
return self.session.query(self.model).filter(self.model.used==True, self.model.retraction_id == None)
def get_count_query(self):
return self.session.query(func.count('*')).filter(self.model.used==True, self.model.retraction_id == None)
class RetractedOrderAdmin(OrderAdmin):
can_create = False
can_edit = False
can_delete = False
can_view_details = True
column_default_sort = ('used_time', True)
def get_query(self):
return self.session.query(self.model).filter(self.model.used==True, self.model.retraction_id <> None)
def get_count_query(self):
return self.session.query(func.count('*')).filter(self.model.used==True, self.model.retraction_id <> None)
class ProductCountInfoInlineModelForm(InlineFormAdmin):
column_labels = dict(count=u"箱件数", gross_weight_per_box=u"每箱毛重(KG)")
class ProductInfoAdmin(LoginRequiredModelView):
#inline_models = [(ProductCountInfo, dict(form_columns=['count']))]
#inline_models = (ProductCountInfoInlineModelForm(ProductCountInfo),)
column_list = (
'name', 'net_weight', 'gross_weight', 'unit_price', 'unit_per_item', 'tax_code', 'full_name', 'report_name',
'waybill_name', 'routes', 'deprecated')
column_labels = dict(name=u"商品名称", net_weight=u"每件净重(KG)", count_infos=u"箱件数 / 毛重 -- 已作废",
price_per_kg=u"每千克价格(KG) -- 已作废", full_name=u"全称(设置后无法修改)", deprecated=u"弃用",
unit_price=u"单价", gross_weight=u"每件毛重(KG)", tax_code=u"商品税号", billing_unit=u"计费单位",
billing_unit_code=u"计费单位代码", unit_per_item=u"单个物品申报数量", specification=u"规格/型号",
bc_product_code=u"BC商品编码", bc_specification=u"BC商品规格型号",
bc_second_quantity=u"BC第二数量", bc_measurement_unit=u"BC计量单位",
bc_second_measurement_unit=u"BC第二计量单位", report_name=u"报单中显示名称",
ticket_name=u"小票名称", ticket_price=u"小票单价", waybill_name=u"面单中显示名称", routes=u'线路',
dutiable_as_any_4_pieces=u"4罐包含一个及以上该产品则报税",
non_dutiable_as_all_6_pieces=u"6罐全是该产品则不报税")
can_view_details = True
column_default_sort = ('name', False)
column_searchable_list = ('name', 'full_name', 'report_name')
form_excluded_columns = ('price_per_kg', 'count_infos')
def _show_count_infos(view, context, model, name):
return Markup(model.count_info_string)
column_formatters = {
'count_infos': _show_count_infos,
}
def on_model_change(self, form, model, is_created):
model.name = "".join(model.name.strip().split())
if not is_created:
if form.full_name.object_data <> form.full_name.data:
model.full_name = form.full_name.object_data
else:
model.full_name = "".join(model.full_name.strip().split())
def ticket_name_max_length(form, field):
if field.data and len(field.data) > 30:
raise ValidationError(u"小票名称最长30字符")
form_args = dict(
ticket_name=dict(validators=[ticket_name_max_length])
)
class RetractionAdmin(LoginRequiredModelView):
list_template = "admin/retraction/list.html"
can_create = False
can_edit = False
can_delete = False
column_searchable_list = ('uuid', 'timestamp')
column_default_sort = ('timestamp', True)
column_formatters = {
'timestamp': lambda v, c, m, p: time_format(m.timestamp),
}
class RouteAdmin(LoginRequiredModelView):
column_labels = dict(name=u"名称", code=u"编码", products=u'包含产品')
column_list = ('name', 'code', 'products')
admin.add_view(SuccessJobAdmin(Job, db.session, endpoint="admin.success_jobs", name=u"生成订单"))
admin.add_view(RetractionAdmin(Retraction, db.session, endpoint="admin.success_retraction", name=u"提取订单"))
admin.add_view(RouteAdmin(Route, db.session, endpoint="admin.route", name=u"线路管理"))
admin.add_view(ProductInfoAdmin(ProductInfo, db.session, endpoint="admin.product_info", name=u"商品信息"))
admin.add_view(UnusedOrderAdmin(Order, db.session, endpoint="admin.unused_standard_order", name=u"未使用订单"))
admin.add_view(UsedOrderAdmin(Order, db.session, endpoint="admin.used_order", name=u"已生成订单"))
admin.add_view(UnretractedOrderAdmin(Order, db.session, endpoint="admin.unretracted_order", name=u"未提取订单"))
admin.add_view(RetractedOrderAdmin(Order, db.session, endpoint="admin.retracted_order", name=u"已提取订单"))
admin.add_view(FailedJobAdmin(Job, db.session, endpoint="admin.failed_jobs", name=u"错误记录"))
admin.add_link(MenuLink(name=u"回到主界面", endpoint="front_end.index"))
```
#### File: app/main/jobs.py
```python
__author__ = 'jie'
import time
import sys
import datetime
import os
import shutil
import zipfile
from flask_rq import job
from .. import app, db
from ..models import Job
from postorder import xls_to_orders
batch_order_queue = app.config['BATCH_ORDER_QUEUE']
@job(batch_order_queue)
def batch_order(job_id, input_file, workdir, test_mode=False):
with app.app_context():
job = Job.query.filter(Job.uuid == job_id).first()
outfile = os.path.join(workdir, job_id + '.zip')
outdir = os.path.join(workdir, 'output')
tmpdir = os.path.join(workdir, 'tmpdir')
curdir = os.getcwd()
if not os.path.exists(outdir):
os.makedirs(outdir)
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
try:
if not job:
raise Exception, "Failed to find job: %d" % job_id
job.status = Job.Status.PROCESSING
db.session.commit()
def percent_callback(percent):
percent = int(percent)
if percent > job.percentage:
job.percentage = percent
# db.session.commit()
xls_to_orders(input_file, outdir, tmpdir, percent_callback, job, test_mode)
outfile = os.path.abspath(outfile)
os.chdir(outdir)
zf = zipfile.ZipFile(
outfile, "w", compression=zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk("."):
if root <> ".":
zf.write(root)
for filename in files:
filepath = os.path.join(root, filename)
zf.write(filepath, arcname=filepath.decode('utf8'))
zf.close()
job.completion_time = datetime.datetime.utcnow()
job.status = Job.Status.COMPLETED
db.session.commit()
except Exception, inst:
db.session.rollback()
import traceback
traceback.print_exc(sys.stderr)
job.message = "Format error: %s" % inst.message.encode('utf8')
job.status = Job.Status.FAILED
db.session.commit()
finally:
os.chdir(curdir)
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
if os.path.exists(outdir):
shutil.rmtree(outdir)
```
#### File: app/main/postorder.py
```python
import cStringIO, json
import re, math, random
import sys
import os
import shutil
import zipfile
import datetime
from cStringIO import StringIO
from optparse import OptionParser
from jinja2 import Environment, FileSystemLoader
import pandas as pd
import barcode
from barcode.writer import ImageWriter
from weasyprint import HTML, CSS
from wand.image import Image
from wand.color import Color
from PyPDF2 import PdfFileMerger, PdfFileReader, PdfFileWriter
from sqlalchemy import desc, asc, Index, UniqueConstraint, and_
from openpyxl import load_workbook
from openpyxl.utils.dataframe import dataframe_to_rows
from flask import current_app
from pdf2image import convert_from_path, convert_from_bytes
from faker import Faker
from unidecode import unidecode
from ..models import City, Order, ProductInfo
from .. import db
from ..util import time_to_filename
Code128 = barcode.get_barcode_class('code128')
PROVINCE_INFO_MAP = {
u"湖南": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"广西": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"海南": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"江西": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"福建": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"湖北": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"江苏": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"上海": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"浙江": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"河北": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"安徽": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"河南": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"山东": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"山西": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"陕西": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"贵州": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"云南": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"重庆": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"四川": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"北京": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"天津": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"辽宁": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"黑龙江": {'ticket_initial': 9, 'package_type': u"快递包裹"},
u"广东": {'ticket_initial': 1, 'package_type': u"快递包裹"},
u"内蒙古": {'ticket_initial': 1, 'package_type': u"快递包裹"},
u"甘肃": {'ticket_initial': 1, 'package_type': u"快递包裹"},
u"青海": {'ticket_initial': 1, 'package_type': u"快递包裹"},
u"宁夏": {'ticket_initial': 1, 'package_type': u"快递包裹"},
u"吉林": {'ticket_initial': 1, 'package_type': u"快递包裹"},
u"西藏": {'ticket_initial': 1, 'package_type': u"快递包裹"},
u"新疆": {'ticket_initial': 1, 'package_type': u"快递包裹"},
}
PROVINCE_NAMES = [City.normalize_province(name) for name in PROVINCE_INFO_MAP.keys()]
ADDRESS_LIMIT_FIXTURES = [u'维吾尔自治区', u'壮族自治区', u'回族自治区', u'古自治区', u'自治区', u'特别行政区', u'省', u'市',
u'盟', u'县', u'区']
ITEM_NAME_RE = re.compile(
ur"^.*?((([123一二三])|([4四]))段|(\d+)g)$", flags=re.U | re.I)
ITEM_NAME_MAP_INFO = {
u"爱他美1段": {
"net_weight": 0.9,
"gross_weights": {
4: 4.35,
6: 7,
},
"price_per_kg": 89.06,
"full_name": u"爱他美奶粉1段900g",
},
u"爱他美2段": {
"net_weight": 0.9,
"gross_weights": {
4: 4.35,
6: 7,
},
"price_per_kg": 89.06,
"full_name": u"爱他美奶粉2段900g",
},
u"爱他美3段": {
"net_weight": 0.9,
"gross_weights": {
4: 4.35,
6: 7,
},
"price_per_kg": 89.06,
"full_name": u"爱他美奶粉3段900g",
},
u"爱他美4段": {
"net_weight": 0.8,
"gross_weights": {
4: 3.90,
6: 6,
},
"price_per_kg": 86.50,
"full_name": u"爱他美奶粉4段800g",
},
u"牛栏1段": {
"net_weight": 0.9,
"gross_weights": {
4: 4.35,
6: 7,
},
"price_per_kg": 89.06,
"full_name": u"牛栏奶粉1段900g",
},
u"牛栏2段": {
"net_weight": 0.9,
"gross_weights": {
4: 4.35,
6: 7,
},
"price_per_kg": 86.50,
"full_name": u"牛栏奶粉2段900g",
},
u"牛栏3段": {
"net_weight": 0.9,
"gross_weights": {
4: 4.35,
6: 7,
},
"price_per_kg": 86.50,
"full_name": u"牛栏奶粉3段900g",
},
u"牛栏4段": {
"net_weight": 0.8,
"gross_weights": {
4: 3.90,
6: 6,
},
"price_per_kg": 94.30,
"full_name": u"牛栏奶粉4段800g",
},
}
TAX_CODE_MAP = {
'01010700': (u'奶粉', u'罐'),
}
def calculate_item_info(n_row, item_name, item_count):
item_name = "".join(item_name.strip().split()).decode("utf8")
if not item_name in ITEM_NAME_MAP_INFO:
raise Exception, u"第%d行包含未注册商品名称: %s" % (n_row + 1, item_name)
info = ITEM_NAME_MAP_INFO[item_name]
if not item_count in info["gross_weights"]:
raise Exception, u"第%d行商品[%s]包含未注册数量:%d" % (
n_row + 1, item_name, item_count)
return info["net_weight"] * item_count * info["price_per_kg"], info["net_weight"] * item_count, \
info["gross_weights"][item_count], info["price_per_kg"], \
info["full_name"] if "full_name" in info else item_name
def calculate_item_info_from_db(n_row, item_name, item_count):
item_name = "".join(item_name.strip().split()).decode("utf8")
search_result = ProductInfo.find_product_and_weight(item_name, item_count)
if not search_result:
raise Exception, u"第%d行包含未注册商品名称和箱件数 %s[%d件]" % (
n_row + 1, item_name, item_count)
product_info, gross_weight_per_box = search_result
return product_info.net_weight * item_count * product_info.price_per_kg, product_info.net_weight * item_count, \
gross_weight_per_box, product_info.price_per_kg, \
product_info.full_name if product_info.full_name else item_name
def calculate_item_info_from_db_without_product_info(n_row, item_name, item_count):
item_name = "".join(item_name.strip().split()).decode("utf8")
product_info = ProductInfo.query.filter(and_(ProductInfo.name==item_name, ProductInfo.deprecated==False)).first()
if not product_info:
raise Exception, u"第%d行包含未注册商品: %s" % (n_row + 1, item_name)
# from sqlalchemy import inspect
# for c in inspect(product_info).mapper.column_attrs:
# print c.key, getattr(product_info, c.key)
if product_info.unit_price is None or product_info.gross_weight is None or product_info.unit_per_item is None \
or product_info.tax_code is None or product_info.billing_unit is None \
or product_info.billing_unit_code is None or product_info.specification is None \
or not product_info.full_name:
raise Exception, u"第%d行商品 [%s] 注册信息不完整" % (n_row + 1, item_name)
waybill_name = product_info.waybill_name if product_info.waybill_name else product_info.full_name
return product_info.unit_price * product_info.unit_per_item * item_count, product_info.net_weight * item_count, \
product_info.gross_weight * item_count, product_info.unit_price, \
product_info.full_name, \
product_info.net_weight, product_info.tax_code, product_info.billing_unit, product_info.billing_unit_code, \
product_info.unit_per_item, product_info.specification, waybill_name
class NoTextImageWriter(ImageWriter):
def __init__(self):
super(NoTextImageWriter, self).__init__()
def _paint_text(self, xpos, ypos):
pass
def random_date():
now = datetime.datetime.now()
start = now + datetime.timedelta(days=-21)
end = now + datetime.timedelta(days=-1)
delta = end - start
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = random.randrange(int_delta)
candidate = start + datetime.timedelta(seconds=random_second)
if 8 <= candidate.hour <= 18:
pass
else:
candidate = candidate.replace(hour=random.randrange(8, 18))
return candidate
TICKET_SELECTIONS = [
{
'template': 'tickets/tesco.html',
'images': ['static/img/tickets/TESCO-1.jpg', 'static/img/tickets/TESCO-2.jpg'],
'serial_number_len': 13
},
{
'template': 'tickets/asda.html',
'images': ['static/img/tickets/ASDA-1.jpg',],
'serial_number_len': 13
},
{
'template': 'tickets/morrisons.html',
'images': ['static/img/tickets/Morrisons-1.jpg', 'static/img/tickets/Morrisons-2.jpg', ],
'serial_number_len': 13
},
{
'template': 'tickets/quality_safe.html',
'images': ['static/img/tickets/QualitySafe-1.jpg', 'static/img/tickets/QualitySafe-2.jpg', ],
'serial_number_len': 8
},
]
def generate_tickets_from_mapping_file(input_xlsx, mapping_xlsx, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if os.path.isdir(input_xlsx):
input_xlsxs = map(lambda x:os.path.join(input_xlsx, x),
filter(lambda x:x.lower().endswith('.xlsx'), os.listdir(input_xlsx)))
else:
input_xlsxs = [input_xlsx]
input_dfs = [pd.read_excel(input_xlsx, skiprows=[0, ], converters={
u'分运单号': lambda x: str(x),
u'物品名称': lambda x: str(x),
}) for input_xlsx in input_xlsxs]
input_df = pd.concat(input_dfs)
print >> sys.stderr, "%d total tracking numbers" % len(input_df[u'分运单号'].drop_duplicates().index)
mapping_df = pd.read_excel(mapping_xlsx, converters={
u'物品名称': lambda x: str(x),
u'小票名称': lambda x: str(x),
u'小票价格': lambda x: float(x),
})
duplicates = mapping_df.groupby(u'物品名称').filter(lambda x: len(x) > 1).drop_duplicates(subset=u'物品名称')
if len(duplicates) > 0:
print >> sys.stderr, duplicates
raise Exception, "duplicates"
combined_df = pd.merge(input_df, mapping_df, on=u'物品名称', how='left')
unregisted_products = set()
for column in (u"小票名称", u"小票价格"):
null_valued = pd.isnull(combined_df[column])
if null_valued.any():
product_name_null_valued = combined_df[null_valued][u'物品名称'].drop_duplicates() \
.map(lambda x: str(x)).tolist()
unregisted_products |= set(product_name_null_valued)
if len(unregisted_products) > 0:
with open(os.path.join(output_dir, 'products.txt'), 'w') as f:
for product in sorted(unregisted_products):
print >> f, product
raise Exception, "contains unregisted product"
for input_xlsx, input_df in zip(input_xlsxs, input_dfs):
print >> sys.stderr, "processing: ", input_xlsx
print >> sys.stderr, "%d tracking numbers" % len(input_df[u'分运单号'].drop_duplicates().index)
combined_df = pd.merge(input_df, mapping_df, on=u'物品名称', how='left')
ticket_info = {
'groups': combined_df[[u'分运单号', u"小票名称", u'件数', u"小票价格"]].groupby(u'分运单号'),
'item_column': u"小票名称",
'count_column': u'件数',
'price_column': u"小票价格",
}
ticket_dir = os.path.join(output_dir, os.path.splitext(os.path.basename(input_xlsx))[0])
if not os.path.exists(ticket_dir):
os.makedirs(ticket_dir)
generate_tickets(ticket_info, ticket_dir, suffix='.jpg')
if os.path.exists(ticket_dir):
shutil.make_archive(ticket_dir, 'zip', ticket_dir)
shutil.rmtree(ticket_dir)
def generate_tickets(ticket_info, ticket_dir, suffix='.jpg'):
item_column = ticket_info['item_column']
count_column = ticket_info['count_column']
price_column = ticket_info['price_column']
faker = Faker()
def format_number(x):
if isinstance(x, float) or isinstance(x, int):
return "%.2f" % x
return map(lambda x:"%.2f" % x, x)
def random_context(selections):
supermarket = random.choice(selections)
supermarket['image'] = random.choice(supermarket['images'])
return supermarket
generated_count = 0
for name, group in ticket_info['groups']:
filename = os.path.join(ticket_dir, name + suffix)
env = Environment(loader=FileSystemLoader('templates'))
total = 0
total_items = 0
for price, count in zip(group[price_column], group[count_column]):
total += price * count
total_items += count
paid = int(math.ceil(total / 10.0)) * 10
change = paid - total
context = random_context(TICKET_SELECTIONS)
context.update({
'breakdown': zip(group[item_column], group[count_column], format_number(group[price_column])),
'total': format_number(total),
'paid': format_number(paid),
'change': format_number(change),
'points': int(total * 100),
'timestamp': random_date(),
'serial_number': faker.ean(length=context['serial_number_len']),
'total_items': total_items,
})
template = env.get_template(context['template'])
output_from_parsed_template = template.render(context)
png_data = HTML(string=output_from_parsed_template, base_url='.').write_png(
stylesheets=["static/css/ticket_style.css"], resolution=150)
im = Image(blob=png_data)
im.trim(Color('white'))
im.format = 'jpeg'
im.trim(Color('black'))
im.save(filename=filename)
generated_count += 1
print >> sys.stderr, "%d tickets generated" % generated_count
def generate_pdf(ticket_number, filename, context, tmpdir):
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
bot_image = os.path.join(tmpdir, 'bot_barcode_trim.png')
top_image = os.path.join(tmpdir, 'top_barcode_trim.png')
if os.path.exists(bot_image):
os.remove(bot_image)
if os.path.exists(top_image):
os.remove(top_image)
Code128(ticket_number, writer=NoTextImageWriter()).save(os.path.join(tmpdir, 'top_barcode'), options={
'module_height': 5,
'text_distance': 0.5,
'quiet_zone': 1,
'dpi': 1200,
'font_size': 20,
})
im = Image(filename=os.path.join(tmpdir, 'top_barcode.png'))
im.trim()
im.save(filename=top_image)
Code128(ticket_number, writer=NoTextImageWriter()).save(os.path.join(tmpdir, 'bot_barcode'), options={
'module_height': 5,
'text_distance': 0.5,
'quiet_zone': 1,
'dpi': 1200,
'font_size': 20,
})
im = Image(filename=os.path.join(tmpdir, 'bot_barcode.png'))
im.trim()
im.save(filename=bot_image)
if not os.path.exists(top_image) or not os.path.exists(bot_image):
raise Exception, "Image failed to create"
env = Environment(loader=FileSystemLoader('templates'))
template = env.get_template('barcode_fast_track.html')
context['time'] = datetime.datetime.now()
context['bot_image'] = bot_image
context['top_image'] = top_image
output_from_parsed_template = template.render(context)
#
# with codecs.open(filename + ".html", "wb", encoding='utf8') as fh:
# fh.write(output_from_parsed_template)
HTML(string=output_from_parsed_template, base_url='.').write_pdf(
filename, stylesheets=["static/css/style.css"])
def fetch_ticket_number(n_row, receiver_city):
city_name = "".join(receiver_city.strip().split())
cities = City.find_province_path(city_name)
if not cities:
raise Exception, "Cannot find province: %s at row %d" % (
city_name, n_row)
if not cities[0].name in PROVINCE_INFO_MAP:
raise Exception, "Post to province %s is not supported: %s at row %d" % (
city_name, n_row)
info = PROVINCE_INFO_MAP[cities[0].name]
order = Order.pick_unused()
if order is None:
raise Exception, u"订单号不足"
province_name, municipal_name, address_header = City.normalize_province_path(
cities)
return info['package_type'], order, province_name, municipal_name, address_header
def process_row(n_row, in_row, barcode_dir, tmpdir, job=None, ticket_number_generator=None):
p_data = []
sender_name = in_row[u'发件人名字']
sender_phone = in_row[u'发件人电话号码']
sender_address = in_row[u'发件人地址']
receiver_name = in_row[u'收件人名字(中文)']
receiver_mobile = in_row[u'收件人手机号(11位数)']
receiver_address = in_row[u'收件人地址(无需包括省份和城市)']
receiver_city = in_row[u'收件人城市(中文)']
receiver_post_code = in_row[u'收件人邮编']
n_package = in_row.get(u'物品种类数量', None)
if not n_package:
n_package = in_row.get(u'包裹数量', None)
package_weight = in_row[u'包裹重量(公斤)']
length = in_row[u'长(厘米)']
width = in_row[u'宽(厘米)']
height = in_row[u'高(厘米)']
id_number = in_row[u'身份证号(EMS需要)']
for check_field in (sender_name, sender_phone, sender_address, receiver_name, receiver_mobile, receiver_address,
receiver_city, receiver_post_code, id_number):
if pd.isnull(check_field) or not isinstance(check_field, basestring) or not check_field.strip():
raise Exception, u"第%d行数据不完整,请更正" % n_row
if pd.isnull(n_package) or not isinstance(n_package, int) or n_package < 1:
raise Exception, u"第%d行 物品种类数量 或者 包裹数量 异常" % n_row
sender_name = "".join(sender_name.split())
sender_address = "".join(sender_address.split())
sender_phone = "".join(sender_phone.split())
receiver_name = "".join(receiver_name.split())
receiver_mobile = "".join(receiver_mobile.split())
receiver_address = "".join(receiver_address.split())
receiver_city = "".join(receiver_city.split())
receiver_post_code = "".join(receiver_post_code.split())
id_number = "".join(id_number.split())
package_type, order, receiver_province, receiver_municipal, receiver_address_header = \
fetch_ticket_number(n_row, receiver_city)
receiver_city = receiver_municipal
receiver_address = receiver_address_header + receiver_address
pc_text = receiver_province + receiver_municipal
receiver_province_city_font_size = "3" if len(
pc_text) <= 10 else "2.5" if len(pc_text) <= 15 else "2"
if not ticket_number_generator:
order.used = True
order.used_time = datetime.datetime.utcnow()
order.sender_address = ", ".join(
(sender_name, sender_address, sender_phone))
order.receiver_address = ", ".join(
(receiver_address, receiver_city, receiver_post_code))
order.receiver_mobile = receiver_mobile
order.receiver_id_number = id_number
order.receiver_name = receiver_name
if job:
order.job = job
job.version = "v3"
ticket_number = order.order_number
else:
ticket_number = ticket_number_generator.next()
full_address = "".join(filter(
lambda x: x.strip(), (receiver_province, receiver_city, receiver_address)))
p_data_list = []
item_names = []
total_price = 0
total_item_count = 0
total_net_weight = 0
total_gross_weight = 0
for i in xrange(n_package):
suffix = "" if i == 0 else ".%d" % i
item_name = in_row[u'申报物品%d(英文)' % (i + 1)]
item_count = in_row[u'数量%s' % suffix]
unit_price = in_row[u'物品单价(英镑)%s' % suffix]
if item_name is None or pd.isnull(item_name):
raise Exception, u"第%d行第%d个商品名称为空" % (n_row, i + 1)
item_name = str(item_name).strip()
sub_total_price, net_weight, gross_weight, unit_price, item_full_name, net_weight_per_item, tax_code, \
billing_unit, billing_unit_code, unit_per_item, specification, waybill_name \
= calculate_item_info_from_db_without_product_info(n_row, item_name, item_count)
item_names.append(u"%s\u2736%d" % (waybill_name, item_count))
total_price += sub_total_price
total_item_count += item_count
total_net_weight += net_weight
total_gross_weight += gross_weight
p_data_list.append([
ticket_number, sender_name, sender_address, sender_phone, receiver_name, receiver_mobile, receiver_city if receiver_city else receiver_province,
receiver_post_code, full_address, item_full_name, item_count, sub_total_price, gross_weight, item_full_name,
net_weight, unit_price, u"CNY", id_number,
City.denormalize_province(receiver_province),
City.denormalize_municipality(receiver_city if receiver_city else receiver_province)
])
# for p in p_data_list:
# p[10] = total_item_count
# p[11] = total_price
# p[12] = total_gross_weight
# p_data.append(p)
p_data = p_data_list
total_price = "%.2f" % total_price
if total_price.endswith(".00") and len(total_price) > 3:
total_price = total_price[:-3]
item_names = ", ".join(item_names)
generate_pdf(ticket_number, os.path.join(
barcode_dir, '%s.pdf' % ticket_number), locals(), tmpdir)
return ticket_number, pd.DataFrame(p_data, columns=[
u'快件单号', u'发件人', u'发件人地址', u'电话号码', u'收件人', u'电话号码.1', u'城市',
u'邮编', u'收件人地址', u'内件名称', u'数量', u'总价(元)', u'毛重(KG)', u'物品名称',
u'数量.1', u'单价', u'币别', u'备注', 'province', 'city'
])
def normalize_columns(in_df):
in_df.columns = map(lambda x: "".join(x.strip().split()), in_df.columns)
def xls_to_orders(input, output, tmpdir, percent_callback=None, job=None, test_mode=False):
if percent_callback:
percent_callback(0)
in_df = pd.read_excel(input, converters={
u'发件人电话号码': lambda x: str(x),
u'收件人邮编': lambda x: str(x),
u'收件人手机号\n(11位数)': lambda x: str(x),
u'身份证号\n(EMS需要)': lambda x: str(x),
u'收件人手机号(11位数)': lambda x: str(x),
u'身份证号(EMS需要)': lambda x: str(x),
u'包裹数量': lambda x: int(x),
u'物品种类数量': lambda x: int(x),
})
if 'MAX_ORDER_PER_BATCH' in current_app.config \
and len(in_df.index) > current_app.config['MAX_ORDER_PER_BATCH']:
raise Exception, u"该批次个数(%d)超过最大订单数: %d" % \
(len(in_df.index), current_app.config['MAX_ORDER_PER_BATCH'])
normalize_columns(in_df)
package_columns = [u"报关单号", u'总运单号', u'袋号', u'快件单号', u'发件人', u'发件人地址',
u'电话号码', u'收件人', u'电话号码.1', u'城市', u'邮编', u'收件人地址', u'内件名称',
u'数量', u'总价(元)', u'毛重(KG)', u'税号', u'物品名称', u'品牌', u'数量.1',
u'单位', u'单价', u'币别', u'备注', 'province', 'city']
package_df = pd.DataFrame([], columns=package_columns)
package_data = [package_df]
barcode_dir = os.path.join(output, "barcode")
if not os.path.exists(barcode_dir):
os.makedirs(barcode_dir)
ticket_numbers = []
ticket_number_set = set()
test_ticket_number_generator = None
if test_mode:
def ticket_number_generator():
start_number = 1
while True:
yield "TEST%s" % str(start_number).zfill(8)
start_number += 1
test_ticket_number_generator = ticket_number_generator()
if job:
job.version = "test_mode"
for index, in_row in in_df.iterrows():
ticket_number, p_data = process_row(
index, in_row, barcode_dir, tmpdir, job, test_ticket_number_generator)
if ticket_number in ticket_number_set:
raise Exception, u"同批次单号%s重复,请联系客服!" % ticket_number
ticket_number_set.add(ticket_number)
ticket_numbers.append(ticket_number)
package_data.append(p_data)
if percent_callback:
percent_callback(int(index * 100.0 / len(in_df.index)))
waybills = []
total_page_number = 0
merger = PdfFileMerger()
for ticket_number in ticket_numbers:
pdf_file = os.path.join(barcode_dir, "%s.pdf" % ticket_number)
if not os.path.exists(pdf_file):
raise Exception, "Failed to generate pdf: %s" % ticket_number
pdf_file_reader = PdfFileReader(file(pdf_file, 'rb'))
page_number = pdf_file_reader.getNumPages()
waybills.append({
'tracking_no': ticket_number,
'start_page': total_page_number,
'end_page' : total_page_number + page_number,
})
total_page_number += page_number
merger.append(pdf_file_reader)
merger.write(os.path.join(output, u"面单.pdf".encode('utf8')))
with open(os.path.join(output, "waybills.json"), 'w') as outfile:
json.dump(waybills, outfile)
shutil.rmtree(barcode_dir)
package_final_df = pd.concat(package_data, ignore_index=True)
package_final_df[u'税号'] = '01010700'
package_final_df[u'单位'] = u'千克'
package_final_df.index += 1
package_final_df.to_excel(os.path.join(output, u"机场报关单.xlsx".encode('utf8')),
columns=package_columns, index_label="NO")
if percent_callback:
percent_callback(100)
def read_order_numbers(inxlsx):
columns = [u'提取单号', u'分运单号', u'快件单号', u'物流运单编号']
df = pd.read_excel(inxlsx, converters={
key: lambda x: str(x) for key in columns
})
column = None
for key in columns:
if key in df:
column = key
if not column:
raise Exception, u"输入Excel格式错误"
order_numbers = df[column].unique()
if len(order_numbers) <= 0:
raise Exception, u"输入[%s]列为空" % column
return order_numbers
def generate_customs_df(route_config, version, package_df):
route_name = route_config['name']
if version <> "v3":
raise Exception, "Version not supported for generate_customs_df: %s" % version
package_df["Sequence"] = range(1, len(package_df.index) + 1)
customs_columns = [u'分运单号', u'申报类型', u'物品名称', u'英文物品名称', u'商品编码', u'净重(KG)', u'毛重(KG)',
u'规格/型号', u'产销城市', u'币制', u'申报数量', u'申报总价', u'申报计量单位', u'收件人', u'收件人城市',
u'收件人地址', u'收件人电话', u'发件人国家', u'发件人', u'英文发件人', u'发件人城市', u'英文发件人城市',
u'英文经停城市', u'发件人地址', u'英文发件人地址', u'发件人电话', u'收发件人证件类型', u'收发件人证件号',
u'包装种类', u'是否含木质包装', u'是否为旧物品', u'是否为低温运输', u'生产国别', u'贸易国别']
customs_df = pd.DataFrame([], columns=customs_columns)
for column, p_column in ((u'分运单号', u'快件单号'),
(u'物品名称', u'内件名称'),
(u'数量', u'数量'),
(u'毛重(KG)', u'毛重(KG)'),
(u'收件人', u'收件人'),
(u'收发件人证件号', u'备注'),
(u'收件人城市', u'city'),
(u'收件人地址', u'收件人地址'),
(u'收件人电话', u'电话号码.1'),
(u'发件人', u'发件人'),
(u'英文发件人', u'发件人'),
(u'发件人地址', u'发件人地址'),
(u'英文发件人地址', u'发件人地址'),
(u'发件人电话', u'电话号码'),
('Sequence', 'Sequence')):
customs_df[column] = package_df[p_column]
#fill in bc product info
product_info_df = pd.read_sql_query(ProductInfo.query.filter(ProductInfo.full_name.in_(
tuple(set(customs_df[u'物品名称'].map(lambda x: str(x)).tolist())))).statement, db.session.bind)
columns_to_delete = product_info_df.columns
product_info_df.rename(columns={'full_name': u'物品名称'}, inplace=True)
customs_df = pd.merge(customs_df, product_info_df, on=u'物品名称')
product_info_columns = [(u"申报单价", "unit_price"),
(u"商品编码", "tax_code"),
(u"规格/型号", "specification"),
(u"申报计量单位", "billing_unit_code")]
# check if any is empty
for column, _column in product_info_columns \
+ [(u"单个物品申报数量", "unit_per_item"),
(u"小票名称", "ticket_name"),
(u"小票价格", "ticket_price")]:
null_valued = pd.isnull(customs_df[_column])
if null_valued.any():
product_name_null_valued = customs_df[null_valued][u'物品名称'].drop_duplicates() \
.map(lambda x: str(x)).tolist()
raise Exception, u"如下商品的注册信息未包含必须字段[%s]: %s" % \
(column, ", ".join(product_name_null_valued))
ticket_info = {
'groups': customs_df[[u'分运单号', "ticket_name", u'数量', "ticket_price"]].groupby(u'分运单号'),
'item_column': 'ticket_name',
'count_column': u'数量',
'price_column': 'ticket_price',
}
for column, p_column in product_info_columns:
customs_df[column] = customs_df[p_column]
def customs_column_filter(row):
row[u"物品名称"] = row[u"物品名称"] if pd.isnull(row["report_name"]) else row["report_name"]
row[u"英文物品名称"] = row["ticket_name"]
row[u"净重(KG)"] = row[u"数量"] * row["net_weight"]
row[u'申报数量'] = row[u'数量'] * row["unit_per_item"]
row[u'申报总价'] = row[u'申报数量'] * row[u"申报单价"]
row[u'英文发件人'] = unidecode(row[u'发件人'])
row[u'英文发件人地址'] = unidecode(row[u'发件人地址'])
return row
customs_df = customs_df.apply(customs_column_filter, axis=1)
for column in columns_to_delete:
if column in customs_df:
del customs_df[column]
customs_df.sort_values(by=["Sequence"], inplace=True)
#fixed items
customs_df[u"申报类型"] = "B"
customs_df[u"产销城市"] = u"曼彻斯特"
customs_df[u"币制"] = "142"
customs_df[u"发件人国家"] = "303"
customs_df[u"发件人城市"] = "曼彻斯特"
customs_df[u"英文发件人城市"] = "Manchester"
customs_df[u"收发件人证件类型"] = "1"
customs_df[u"包装种类"] = "2"
customs_df[u"是否含木质包装"] = "0"
customs_df[u"是否为旧物品"] = "0"
customs_df[u"是否为低温运输"] = "0"
customs_df[u"生产国别"] = "303"
customs_df[u"贸易国别"] = "303"
#sort
customs_df.sort_values(by=["Sequence"], inplace=True)
del customs_df["Sequence"]
del package_df["Sequence"]
del customs_df[u"申报单价"]
del customs_df[u"数量"]
return customs_df, ticket_info
def generate_summary_wb(customs_df):
def summary_each_tax_code(group):
tax_code_column = group[u'商品编码'].unique()
assert(len(tax_code_column) == 1)
tax_code = tax_code_column[0]
name, unit = TAX_CODE_MAP.get(tax_code, ('', ''))
return pd.Series({
u'序号': '',
u'商品编号': tax_code,
u'物品名称': name,
u'件数(纸箱)': len(group[u'分运单号'].unique()),
u'重量': group[u'毛重(KG)'].sum(),
u'数量': group[u'数量'].sum(),
u'单位': unit,
u'币制': 'RMB',
u'价值': '',
u'备注': '',
})
columns = (u'序号', u'商品编号', u'物品名称', u'件数(纸箱)', u'重量', u'数量', u'单位', u'币制', u'价值', u'备注')
summary_df = customs_df.groupby(u'商品编码').apply(summary_each_tax_code)
summary_df = summary_df.reindex(columns=columns)
summary_df[u'序号'] = range(1, len(summary_df.index)+1)
summary_df = summary_df.append(summary_df.sum(numeric_only=True), ignore_index=True)
for key, value in ((u'序号', ''), (u'商品编号', u'合计')):
summary_df.iloc[-1, summary_df.columns.get_loc(key)] = value
wb = load_workbook(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'summary_header.xlsx'))
ws = wb[u'汇总清单']
row_count = 0
for r in dataframe_to_rows(summary_df, index=False, header=False):
row_count += 1
ws.append(r)
return wb
def map_full_name_to_report_name(data_df, column_name):
if not column_name in data_df.columns:
raise Exception, "%s not in header" % column_name
product_info_df = pd.read_sql_query(ProductInfo.query.filter(ProductInfo.full_name.in_(
tuple(set(data_df[column_name].map(lambda x: str(x)).tolist())))).statement, db.session.bind)
columns_to_delete = product_info_df.columns
product_info_df.rename(columns={'full_name': column_name}, inplace=True)
data_df = pd.merge(data_df, product_info_df, on=column_name)
data_df[column_name] = data_df.apply(lambda row:row['report_name'] if row['report_name'] else row[column_name],
axis=1)
for column in columns_to_delete:
if column in data_df:
del data_df[column]
return data_df
def remap_customs_df(customs_final_df):
wb = load_workbook(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cc_header.xlsx'))
ws = wb[u"申报单格式"]
row_count = 0
for r in dataframe_to_rows(customs_final_df, index=False, header=False):
row_count += 1
ws.append(r)
# merge cell for this one
# base_index = 7
# last_value = 0
# last_row_num = None
# columns = (1, 2, 4, 15, 16, 17, 18, 19, 20, 22, 23, 24, 26)
#for row_num in range(base_index, base_index + row_count):
# rd = ws.row_dimensions[row_num]
# rd.height = 50
# is_last_row = (row_num == base_index + row_count - 1)
#
# package_index = int(ws.cell(row=row_num, column=1).value)
# assert (package_index > 0)
# if last_value <= 0:
# last_value = package_index
# last_row_num = row_num
# else:
# if is_last_row or last_value != package_index:
# if row_num > last_row_num + 1 or (is_last_row and row_num > last_row_num and last_value == package_index):
# start_row = last_row_num
# end_row = row_num if is_last_row else row_num - 1
# for _row_num in range(start_row, end_row):
# for column in columns:
# first_value = ws.cell(row=_row_num, column=column).value
# second_value = ws.cell(row=end_row, column=column).value
# assert ((isinstance(first_value, float) and isinstance(second_value, float) and
# math.isnan(first_value) and math.isnan(second_value))
# or (first_value == second_value))
# for column in columns:
# ws.merge_cells(start_row=start_row, start_column=column,
# end_row=end_row, end_column=column)
# last_value = package_index
# last_row_num = row_num
return wb
def retract_from_order_numbers(download_folder, order_numbers, output, route_config, retraction=None):
route_name = route_config['name']
route_code = route_config['code']
if not output is None:
waybill_dir = os.path.join(output, u"面单")
ticket_dir = os.path.join(output, u"小票")
if not os.path.exists(waybill_dir):
os.makedirs(waybill_dir)
if not os.path.exists(ticket_dir):
os.makedirs(ticket_dir)
# find all jobs and job to order number map
receiver_sig_to_order_numbers = {}
all_order_numbers = set()
uuid_to_order_numbers = {}
job_versions = {}
for i, order_number in enumerate(order_numbers):
order_number = str(order_number).strip()
if order_number in all_order_numbers:
continue
else:
all_order_numbers.add(order_number)
order = Order.find_by_order_number(order_number)
if not order:
raise Exception, u"第%d行包含未上载订单号: %s" % (i + 1, order_number)
if not order.used:
raise Exception, u"第%d行包含未使用订单号: %s" % (i + 1, order_number)
if order.discarded_time:
raise Exception, u"第%d行包含弃用订单号: %s" % (i + 1, order_number)
if retraction is not None and retraction.is_redo: #normal
if order.retraction is None:
raise Exception, u"第%d行订单号还未被提取: %s" % (i + 1, order_number)
else: #normal
if retraction is not None and order.retraction is not None:
raise Exception, u"第%d行订单号已被提取: %s, 提取信息为: Uuid [%s], 时间 [%s]" % \
(i + 1, order_number, order.retraction.uuid,
time_to_filename(order.retraction.timestamp))
receiver_sig = order.receiver_id_number
if not receiver_sig in receiver_sig_to_order_numbers:
receiver_sig_to_order_numbers[receiver_sig] = []
if len(receiver_sig_to_order_numbers[receiver_sig]) >= route_config['max_order_number_per_receiver']:
raise Exception, u"单个收件人超过最大订单数(%d): 第%d行订单(%s)与[ %s ]包含相同证件号码(%s), 收件人: %s" % \
(route_config['max_order_number_per_receiver'], i + 1, order_number,
" / ".join(["第%d行订单(%s)" % (x + 1, y)
for x, y in receiver_sig_to_order_numbers[receiver_sig]]),
receiver_sig, order.receiver_name)
receiver_sig_to_order_numbers[receiver_sig].append((i, order_number))
uuid = str(order.job.uuid)
if not uuid in uuid_to_order_numbers:
uuid_to_order_numbers[uuid] = set()
uuid_to_order_numbers[uuid].add(order_number)
job_versions[uuid] = order.job.version if order.job.version else "v1"
if retraction and not retraction.is_redo:
order.retraction = retraction
version_to_dfs = {}
for uuid, order_number_set in uuid_to_order_numbers.items():
version = job_versions[uuid]
if not version in version_to_dfs:
version_to_dfs[version] = {'package_dfs': [], 'customs_dfs' : []}
package_dfs = version_to_dfs[version]['package_dfs']
job_file = os.path.join(download_folder, uuid, uuid + '.zip')
if not os.path.exists(job_file):
raise Exception, u"历史数据丢失:%s" % uuid
with zipfile.ZipFile(job_file) as z:
if version == "v3":
package_df = pd.read_excel(z.open(u"机场报关单.xlsx"), index_col=0, converters={
u'快件单号': lambda x: str(x),
u'电话号码': lambda x: str(x),
u'电话号码.1': lambda x: str(x),
u'邮编': lambda x: str(x),
u'税号': lambda x: str(x),
u'备注': lambda x: str(x),
})
sub_package_df = package_df[
package_df[u"快件单号"].isin(order_number_set)]
#output waybill
if output:
waybills = json.load(z.open('waybills.json'))
pdf_data = StringIO(z.open(u"面单.pdf").read())
pdf_data.seek(0)
pdf = PdfFileReader(pdf_data)
page_count = pdf.getNumPages()
for waybill in waybills:
if waybill['tracking_no'] in order_number_set:
if waybill['end_page'] > page_count or waybill['start_page'] >= page_count:
raise Exception, "Waybill page length %d-%d larger than pdf length %d" % \
(waybill['start_page'], waybill['end_page'], page_count)
out_pdf = PdfFileWriter()
for i in xrange(waybill['start_page'], waybill['end_page']):
out_pdf.addPage(pdf.getPage(i))
pdf_content = StringIO()
out_pdf.write(pdf_content)
pdf_content.seek(0)
images = convert_from_bytes(pdf_content.read(), dpi=50)
if images:
images[0].save(os.path.join(waybill_dir, waybill['tracking_no'] + '.jpg'))
else:
raise Exception, "No jpg waybill generated for %s" % waybill['tracking_no']
else:
raise Exception, "Version not supported %s" % version
package_dfs.append(sub_package_df)
for version, data in version_to_dfs.iteritems():
package_dfs = data['package_dfs']
def validate_route(package_df):
if version == "v3":
product_col = u"内件名称"
order_number_col = u'快件单号'
count_col = u'数量'
else:
raise Exception, "Version not supported: %s" % version
products_exclude = route_config['products_exclude'] if 'products_exclude' in route_config else []
for product_exclude in products_exclude:
product_exclude = product_exclude.strip()
if product_exclude:
excluded = package_df[product_col].str.contains(product_exclude)
if excluded.any():
order_numbers_excluded = package_df[excluded][order_number_col].map(lambda x:str(x)).tolist()
raise Exception, u"如下订单号包含违禁产品[%s]: %s" % \
(product_exclude, ", ".join(order_numbers_excluded))
package_final_df = pd.concat(package_dfs, ignore_index=True)
package_final_df.index += 1
validate_route(package_final_df)
if output:
if version == "v3":
customs_final_df, ticket_info = generate_customs_df(route_config, version, package_final_df)
generate_tickets(ticket_info, ticket_dir)
wb = remap_customs_df(customs_final_df)
wb.save(os.path.join(output, u"西安申报单.xlsx".encode('utf8')))
del package_final_df["province"]
del package_final_df["city"]
package_final_df.to_excel(os.path.join(
output, u"机场报关单.xlsx".encode('utf8')), index_label="NO")
#summary_wb = generate_summary_wb(customs_final_df)
#summary_wb.save(os.path.join(output, u"提单号+物品汇总清单.xlsx".encode('utf8')))
if os.path.exists(waybill_dir):
shutil.make_archive(waybill_dir, 'zip', waybill_dir)
shutil.rmtree(waybill_dir)
if os.path.exists(ticket_dir):
shutil.make_archive(ticket_dir, 'zip', ticket_dir)
shutil.rmtree(ticket_dir)
else:
raise Exception, "Version not supported too %s" % version
return package_final_df
def is_dutiable(package_df, product_col, pieces):
dutiable = False
is_dutiable_category = False
if pieces == 4:
dutiable = True if package_df['dutiable_as_any_4_pieces'].any() else False
is_dutiable_category = package_df[product_col].str.contains(u'奶粉', regex=False).all(skipna=False)
elif pieces == 6:
# do use None value
dutiable = False if package_df['non_dutiable_as_all_6_pieces'].all(skipna=False) \
and len(package_df[product_col].unique()) == 1 else True
is_dutiable_category = True
return dutiable, is_dutiable_category
def load_order_info(download_folder, order, route_config):
version = order.job.version if order.job.version else "v1"
if version <> "v3":
raise Exception, "Error: Version"
route_code = route_config['code']
product_col = u"内件名称"
uuid = str(order.job.uuid)
job_file = os.path.join(download_folder, uuid, uuid + '.zip')
if not os.path.exists(job_file):
raise Exception, "Error: Missing File"
with zipfile.ZipFile(job_file) as z:
package_df = pd.read_excel(z.open(u"机场报关单.xlsx"), index_col=0, converters={
u'快件单号': lambda x: str(x),
u'电话号码': lambda x: str(x),
u'电话号码.1': lambda x: str(x),
u'邮编': lambda x: str(x),
u'税号': lambda x: str(x),
u'备注': lambda x: str(x),
})
sub_package_df = package_df[package_df[u"快件单号"] == order.order_number]
if len(sub_package_df.index) <= 0:
raise Exception, "Error: Empty Record"
products_exclude = route_config['products_exclude'] if 'products_exclude' in route_config else []
if products_exclude:
for product_exclude in products_exclude:
product_exclude = product_exclude.strip()
if product_exclude:
excluded = sub_package_df[product_col].str.contains(product_exclude)
if excluded.any():
raise Exception, "Error: Product Excluded"
pieces = sub_package_df[u"数量"].sum()
product_info_df = pd.read_sql_query(ProductInfo.query.filter(ProductInfo.full_name.in_(
tuple(set(sub_package_df[u'内件名称'].map(lambda x: str(x)).tolist())))).statement, db.session.bind)
product_info_df.rename(columns={'full_name': u'内件名称'}, inplace=True)
sub_package_df = pd.merge(sub_package_df, product_info_df, on=u'内件名称')
dutiable, is_dutiable_category = is_dutiable(sub_package_df, u'内件名称', pieces)
return sub_package_df, pieces, dutiable, is_dutiable_category
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-i", "--input", dest="input",
metavar="FILE", help="input file")
parser.add_option("-o", "--output", dest="output",
metavar="DIR", help="output dir")
parser.add_option("-t", "--tmpdir", dest="tmpdir",
metavar="DIR", help="tmpdir dir")
(options, args) = parser.parse_args()
if not options.input or not options.output or not options.tmpdir:
parser.print_help(sys.stderr)
exit(1)
if not os.path.exists(options.output):
os.makedirs(options.output)
try:
try:
xls_to_orders(options.input, options.output, options.tmpdir)
db.session.commit()
except Exception, inst:
db.session.rollback()
raise inst
except Exception, inst:
import traceback
traceback.print_exc(sys.stderr)
print >> sys.stderr, inst.message.encode('utf-8')
```
#### File: migrations/versions/0d7ed6e97606_.py
```python
revision = '0d7ed6e97606'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('product_info', sa.Column('billing_unit', sa.String(length=32), nullable=True))
op.add_column('product_info', sa.Column('billing_unit_code', sa.String(length=32), nullable=True))
op.add_column('product_info', sa.Column('gross_weight', sa.Float(), nullable=True))
op.add_column('product_info', sa.Column('tax_code', sa.String(length=64), nullable=True))
op.add_column('product_info', sa.Column('unit_per_item', sa.Float(), nullable=True))
op.add_column('product_info', sa.Column('unit_price', sa.Float(), nullable=True))
op.alter_column('product_info', 'price_per_kg',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('product_info', 'price_per_kg',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=False)
op.drop_column('product_info', 'unit_price')
op.drop_column('product_info', 'unit_per_item')
op.drop_column('product_info', 'tax_code')
op.drop_column('product_info', 'gross_weight')
op.drop_column('product_info', 'billing_unit_code')
op.drop_column('product_info', 'billing_unit')
### end Alembic commands ###
``` |
{
"source": "jiejieji/Todolist",
"score": 2
} |
#### File: flutterapi/myapp/views.py
```python
from django.shortcuts import render
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework import status
from .serializers import TodolistSerializer
from .models import Todolist
#GET Data
@api_view(['GET'])
def all_todolist(request):
alltodolist = Todolist.objects.all() #ดึงข้อมูลจาก model Todolist
serializer = TodolistSerializer(alltodolist,many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
# POST Data (save data to database)
@api_view(['POST'])
def post_todolist(request):
if request.method == 'POST':
serializer = TodolistSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_404_NOT_FOUND)
@api_view(['PUT'])
def update_todolist(request,TID):
# localhost:8000/api/update-todolist/13
todo = Todolist.objects.get(id=TID)
if request.method == 'PUT':
data = {}
serializer = TodolistSerializer(todo,data=request.data)
if serializer.is_valid():
serializer.save()
data['status'] = 'update'
return Response(data=data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status.HTTP_404_NOT_FOUND)
@api_view(['DELETE'])
def delete_todolist(request,TID):
todo = Todolist.objects.get(id=TID)
if request.method == 'DELETE':
delete = todo.delete()
data = {}
if delete:
data['status'] = 'delete'
statuscode = status.HTTP_200_OK
else:
data['status'] = 'Failed'
statuscode = status.HTTP_400_BAD_REQUEST
return Response(data=data, status=statuscode)
data = [
{
"title":"เบอร์เกอร์หมู",
"subtitle":"เป็นอาหารชนิดหนึ่งที่ถือว่าอยู่ในประเภทเดียวกับแซนด์วิช",
"image_url":"https://raw.githubusercontent.com/jiejieji/BasicAPI/main/hamber.jpg",
"detail":"แฮมเบอร์เกอร์ (อังกฤษ: hamburger) หรือเรียกสั้น ๆ ว่า เบอร์เกอร์ (burger) เป็นอาหารชนิดหนึ่งที่ถือว่าอยู่ในประเภทเดียวกับแซนด์วิช ประกอบด้วยเนื้อสัตว์ปรุงแล้วที่มีลักษณะเป็นแผ่นสอดไส้อยู่ตรงกลาง อาทิ เนื้อวัว เนื้อหมู เนื้อปลาทอด หรือเป็นเนื้อสัตว์หลายประเภทผสมกัน ประกบบนล่างด้วยขนมปังแผ่นกลม /n/nมีการสอดไส้ด้วยผักชนิดต่างๆ เช่น มะเขือเทศ ผักกาดหอม หอมหัวใหญ่ ชีสและเครื่องปรุงรสอื่น เช่น มัสตาร์ด มายองเนส ซอสมะเขือเทศ เป็นต้น แฮมเบอร์เกอร์เป็นอาหารที่ได้รับความนิยมและแพร่หลายไปทั่วโลก"
},
{
"title":"เบอร์เกอร์ไก่",
"subtitle":"เป็นอาหารชนิดหนึ่งที่ถือว่าอยู่ในประเภทเดียวกับแซนด์วิช",
"image_url":"https://raw.githubusercontent.com/jiejieji/BasicAPI/main/chicken-burger.jpg",
"detail":"แฮมเบอร์เกอร์ (อังกฤษ: hamburger) หรือเรียกสั้น ๆ ว่า เบอร์เกอร์ (burger) เป็นอาหารชนิดหนึ่งที่ถือว่าอยู่ในประเภทเดียวกับแซนด์วิช ประกอบด้วยเนื้อสัตว์ปรุงแล้วที่มีลักษณะเป็นแผ่นสอดไส้อยู่ตรงกลาง อาทิ เนื้อวัว เนื้อหมู เนื้อปลาทอด หรือเป็นเนื้อสัตว์หลายประเภทผสมกัน ประกบบนล่างด้วยขนมปังแผ่นกลม /n/nมีการสอดไส้ด้วยผักชนิดต่างๆ เช่น มะเขือเทศ ผักกาดหอม หอมหัวใหญ่ ชีสและเครื่องปรุงรสอื่น เช่น มัสตาร์ด มายองเนส ซอสมะเขือเทศ เป็นต้น แฮมเบอร์เกอร์เป็นอาหารที่ได้รับความนิยมและแพร่หลายไปทั่วโลก"
},
{
"title":"ชีสเบอร์เกอร์",
"subtitle":"เป็นอาหารชนิดหนึ่งที่ถือว่าอยู่ในประเภทเดียวกับแซนด์วิช",
"image_url":"https://raw.githubusercontent.com/jiejieji/BasicAPI/main/burger.jpg",
"detail":"แฮมเบอร์เกอร์ (อังกฤษ: hamburger) หรือเรียกสั้น ๆ ว่า เบอร์เกอร์ (burger) เป็นอาหารชนิดหนึ่งที่ถือว่าอยู่ในประเภทเดียวกับแซนด์วิช ประกอบด้วยเนื้อสัตว์ปรุงแล้วที่มีลักษณะเป็นแผ่นสอดไส้อยู่ตรงกลาง อาทิ เนื้อวัว เนื้อหมู เนื้อปลาทอด หรือเป็นเนื้อสัตว์หลายประเภทผสมกัน ประกบบนล่างด้วยขนมปังแผ่นกลม /n/nมีการสอดไส้ด้วยผักชนิดต่างๆ เช่น มะเขือเทศ ผักกาดหอม หอมหัวใหญ่ ชีสและเครื่องปรุงรสอื่น เช่น มัสตาร์ด มายองเนส ซอสมะเขือเทศ เป็นต้น แฮมเบอร์เกอร์เป็นอาหารที่ได้รับความนิยมและแพร่หลายไปทั่วโลก"
}
]
def Home(request):
return JsonResponse(data=data,safe=False,json_dumps_params={'ensure_ascii': False})
``` |
{
"source": "jiejiekuawoshuai/jieji-8rKxeQws",
"score": 3
} |
#### File: app/api/decorators.py
```python
from functools import wraps
from flask import g
from .errors import forbidden
# permission_required装饰器
def permission_required(permission):
# 通过形参实现了一个装饰器类。对于不同针对性的装饰器,都可以调用这个函数的实现,而只需要做最小的改动(传递形参)
def decorator(f):
# 使用functools模块提供的@wraps装饰器可以避免避免被装饰函数的特殊属性被更改,比如函数名称__name__被更改
# 如果不使用这个模块,则会导致函数名被替换从而导致端点(端点的默认值是函数名)出错
@wraps(f)
def decorated_function(*args, **kwargs):
# *args表示任何多个无名参数,它是一个tuple
# **kwargs表示关键字参数,它是一个dict
# 这个装饰器方法把原函数的形参继承了。因此实际上相当于在原函数开头增加了这个函数的内容
if not g.current_user.can(permission):
# current_user是从内存中取(服务端),然后permission就会根据我们
# 实际需要验证的permission进行形参到实参的转化
return forbidden('Insufficient permissions')
return f(*args, **kwargs)
# 结束判断,把参数传递给原函数(此处的f()即是原函数(更具体的权限验证装饰器),只是f是个丑陋的形参而已)
return decorated_function
return decorator
``` |
{
"source": "jiejieTop/pyqt5",
"score": 3
} |
#### File: pyqt5/wit/wit.py
```python
import time
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QMainWindow
from PyQt5 import QtCore, QtGui, QtWidgets
from Ui_wit import Ui_MainWindow
from Tencent import WISG
from Tencent_send import TencentSend
from PyQt5.QtCore import QTimer
class MainWindow(QMainWindow, Ui_MainWindow):
"""
Class documentation goes here.
"""
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget
@type QWidget
"""
self.data = {
'pn':'null',
'pg':'null',
'bn':0,
'time':0,
'lv':0,
'speed':0,
'led':0,
'beep':0,
'sm':0,
'call':0,
'dn':'null'
}
super(MainWindow, self).__init__(parent)
self.setupUi(self)
@pyqtSlot()
def on_pushButton_clicked(self):
if(self.data['led'] == 0):
self.data['led'] = 1
else:
self.data['led'] = 0
tencent_send.send_data(self.data)
pass
@pyqtSlot()
def on_pushButton_2_clicked(self):
if(self.data['beep'] == 0):
self.data['beep'] = 1
else:
self.data['beep'] = 0
tencent_send.send_data(self.data)
pass
@pyqtSlot()
def on_pushButton_9_clicked(self):
self.data['pn'] = self.LineEdit.text()
self.data['pg'] = self.LineEdit_2.text()
self.data['dn'] = self.LineEdit_4.text()
self.data['bn'] = int(self.LineEdit_3.text())
self.data['lv'] = int(self.LineEdit_5.text())
self.data['speed'] = int(self.LineEdit_6.text())
self.data['time'] = int(self.LineEdit_7.text())
self.data['call'] = int(self.LineEdit_8.text())
self.data['led'] = int(self.LineEdit_9.text())
self.data['beep'] =int( self.LineEdit_10.text())
print(self.data)
tencent_send.send_data(self.data)
self.update_data(self.data)
print("更新信息")
pass
@pyqtSlot()
def on_pushButton_10_clicked(self):
self.LineEdit.setText('')
self.LineEdit_2.setText('')
self.LineEdit_3.setText('')
self.LineEdit_4.setText('')
self.LineEdit_5.setText('')
self.LineEdit_6.setText('')
self.LineEdit_7.setText('')
self.LineEdit_8.setText('')
self.LineEdit_9.setText('')
self.LineEdit_10.setText('')
#self.show_tencent_data()
print("清除信息")
pass
def show_tencent_data(self, show_data):
print("-----------")
print(show_data)
print("-----------")
self.update_data(show_data)
pass
def update_data(self, updata):
self.data = updata
self.LineEdit.setText(str(self.data['pn']))
self.LineEdit_2.setText(str(self.data['pg']))
self.LineEdit_3.setText(str(self.data['bn']))
self.LineEdit_4.setText(str(self.data['dn']))
self.LineEdit_5.setText(str(self.data['lv']))
self.LineEdit_6.setText(str(self.data['speed']))
self.LineEdit_7.setText(str(self.data['time']))
self.LineEdit_8.setText(str(self.data['call']))
self.LineEdit_9.setText(str(self.data['led']))
self.LineEdit_10.setText(str(self.data['beep']))
if(self.data['call']):
ui.graphicsView.setVisible(True)
else:
ui.graphicsView.setVisible(False)
pass
def timer_init(self):
self.timer = QTimer(self) #初始化一个定时器
self.timer.timeout.connect(self.operate) #计时结束调用operate()方法
self.timer.start(5000) #设置计时间隔并启动
pass
def operate(self):
data = tencent.extract_data()
self.show_tencent_data(data)
pass
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
ui = MainWindow()
# 隐藏呼叫
ui.graphicsView.setVisible(False)
ui.setWindowIcon(QtGui.QIcon('./images/IOTOS.ico'))
ui.setWindowTitle("智慧医疗终端")
# 设置窗口背景图片
ui.setStyleSheet("#MainWindow{border-image:url(./images/python.jpg);}")
tencent = WISG()
tencent_send = TencentSend()
ui.timer_init()
ui.show()
sys.exit(app.exec_())
``` |
{
"source": "jiejohn/robotframework",
"score": 2
} |
#### File: robot/model/control.py
```python
import warnings
from robot.utils import setter, py3to2
from .body import Body, BodyItem
from .keyword import Keywords
from .tags import Tags
@py3to2
@Body.register
class For(BodyItem):
type = BodyItem.FOR_TYPE
body_class = Body
repr_args = ('variables', 'flavor', 'values')
__slots__ = ['variables', 'flavor', 'values']
def __init__(self, variables=(), flavor='IN', values=(), parent=None):
self.variables = variables
self.flavor = flavor
self.values = values
self.parent = parent
self.body = None
@setter
def body(self, body):
return self.body_class(self, body)
@property
def keywords(self):
"""Deprecated since Robot Framework 4.0. Use :attr:`body` instead."""
return Keywords(self, self.body)
@keywords.setter
def keywords(self, keywords):
Keywords.raise_deprecation_error()
@property
def source(self):
return self.parent.source if self.parent is not None else None
def visit(self, visitor):
visitor.visit_for(self)
def __str__(self):
variables = ' '.join(self.variables)
values = ' '.join(self.values)
return u'FOR %s %s %s' % (variables, self.flavor, values)
# TODO: Remove deprecated Keyword related properties in RF 4.1/5.0.
@property
def name(self):
"""Deprecated since Robot Framework 4.0."""
warnings.warn("'For.name' is deprecated since Robot Framework 4.0. "
"Access 'variables', 'flavor' or 'values' directly or "
"use 'str()' to get a string representation.", UserWarning)
return '%s %s [ %s ]' % (' | '.join(self.variables), self.flavor,
' | '.join(self.values))
@property
def doc(self):
"""Deprecated since Robot Framework 4.0."""
warnings.warn("'For.doc' is deprecated since Robot Framework 4.0.", UserWarning)
return ''
@property
def args(self):
"""Deprecated since Robot Framework 4.0."""
warnings.warn("'For.args' is deprecated since Robot Framework 4.0.", UserWarning)
return ()
@property
def assign(self):
"""Deprecated since Robot Framework 4.0."""
warnings.warn("'For.assign' is deprecated since Robot Framework 4.0.", UserWarning)
return ()
@property
def tags(self):
"""Deprecated since Robot Framework 4.0."""
warnings.warn("'For.tags' is deprecated since Robot Framework 4.0.", UserWarning)
return Tags()
@property
def timeout(self):
"""Deprecated since Robot Framework 4.0."""
warnings.warn("'For.timeout' is deprecated since Robot Framework 4.0.", UserWarning)
return None
@py3to2
@Body.register
class If(BodyItem):
body_class = Body
inactive = object()
repr_args = ('condition',)
__slots__ = ['condition', '_orelse']
def __init__(self, condition=None, parent=None):
self.condition = condition
self.parent = parent
self.body = None
self._orelse = None
@setter
def body(self, body):
return self.body_class(self, body)
@property # Cannot use @setter because it would create orelses recursively.
def orelse(self):
if self._orelse is None and self:
self._orelse = type(self)(condition=self.inactive, parent=self)
return self._orelse
@orelse.setter
def orelse(self, orelse):
if orelse is None:
self._orelse = None
elif not isinstance(orelse, type(self)):
raise TypeError("Only %s objects accepted, got %s."
% (type(self).__name__, type(orelse).__name__))
else:
orelse.parent = self
self._orelse = orelse
@property
def source(self):
return self.parent.source if self.parent is not None else None
@property
def type(self):
if self.condition is self.inactive:
return None
if not isinstance(self.parent, If):
return self.IF_TYPE
if self.condition:
return self.ELSE_IF_TYPE
return self.ELSE_TYPE
def visit(self, visitor):
if self:
visitor.visit_if(self)
def __str__(self):
if not self:
return u'None'
if not isinstance(self.parent, If):
return u'IF %s' % self.condition
if self.condition:
return u'ELSE IF %s' % self.condition
return u'ELSE'
def __repr__(self):
return BodyItem.__repr__(self) if self else 'If(condition=INACTIVE)'
def __bool__(self):
return self.condition is not self.inactive
# TODO: Remove deprecated Keyword related properties in RF 4.1/5.0.
@property
def name(self):
"""Deprecated since Robot Framework 4.0."""
warnings.warn("'If.name' is deprecated since Robot Framework 4.0. "
"Access 'condition' directly or use 'str()' to get "
"a string representation.", UserWarning)
return self.condition
@property
def doc(self):
"""Deprecated since Robot Framework 4.0."""
warnings.warn("'If.doc' is deprecated since Robot Framework 4.0.", UserWarning)
return ''
@property
def args(self):
"""Deprecated since Robot Framework 4.0."""
warnings.warn("'If.args' is deprecated since Robot Framework 4.0.", UserWarning)
return ()
@property
def assign(self):
"""Deprecated since Robot Framework 4.0."""
warnings.warn("'If.assign' is deprecated since Robot Framework 4.0.", UserWarning)
return ()
@property
def tags(self):
"""Deprecated since Robot Framework 4.0."""
warnings.warn("'If.tags' is deprecated since Robot Framework 4.0.", UserWarning)
return Tags()
@property
def timeout(self):
"""Deprecated since Robot Framework 4.0."""
warnings.warn("'If.timeout' is deprecated since Robot Framework 4.0.", UserWarning)
return None
``` |
{
"source": "jie-json/swift-corelibs-foundation-master",
"score": 2
} |
#### File: swift-corelibs-foundation-master/lib/workspace.py
```python
from subprocess import call
from config import Configuration
from path import Path
import os
class Workspace:
projects = []
def __init__(self, projects):
self.projects = projects
def configure(self):
if Configuration.current.system_root is None:
Configuration.current.system_root = Path("./sysroots/" + Configuration.current.target.triple)
if Configuration.current.toolchain is None:
Configuration.current.toolchain = Path("./toolchains/" + Configuration.current.target.triple)
if Configuration.current.bootstrap_directory is None:
Configuration.current.bootstrap_directory = Path("./bootstrap")
for project in self.projects:
working_dir = Configuration.current.source_root.path_by_appending(project).absolute()
cmd = [Configuration.current.command[0], "--target", Configuration.current.target.triple]
if Configuration.current.system_root is not None:
cmd.append("--sysroot=" + Configuration.current.system_root.relative(working_dir))
if Configuration.current.toolchain is not None:
cmd.append("--toolchain=" + Configuration.current.toolchain.relative(working_dir))
if Configuration.current.bootstrap_directory is not None:
cmd.append("--bootstrap=" + Configuration.current.bootstrap_directory.relative(working_dir))
if Configuration.current.verbose:
cmd.append("--verbose")
print "cd " + working_dir
print " " + " ".join(cmd)
status = call(cmd, cwd=working_dir)
if status != 0:
exit(status) # pass the exit value along if one of the sub-configurations fails
def generate(self):
generated = ""
for project in self.projects:
generated += """
build """ + os.path.basename(project) + """: BuildProject
project = """ + project + """
"""
generated += """
build all: phony | """ + " ".join(reversed(self.projects)) + """
default all
"""
return generated
``` |
{
"source": "JiekaiJia/Deeplearning",
"score": 3
} |
#### File: JiekaiJia/Deeplearning/cifar.py
```python
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torchvision
from torchvision.datasets import CIFAR10
from torchvision.transforms import ToTensor
from torchvision.utils import make_grid
def split_indices(n, vld_pct, seed=2021):
"""This function is used to split the data into train and validation.
Args:
n: the number of train data
vld_pct: the percentage of validation data
seed: keep the random results same each time calling the function
Returns:
the indexes of 2 divided datasets(train indices, validation indices).
"""
n_vld = int(vld_pct*n) # Determine size of validation set
np.random.seed(seed) # Set the random seed(for reproducibility)
idxs = np.random.permutation(n) # Create random permutation of 0 to n-1
return idxs[n_vld:], idxs[:n_vld] # Pick the first n_vld indices for validation set
def get_data_loader(data_set, batch_size):
"""This function generate the batch data for every epoch."""
train_indices, vld_indices = split_indices(len(data_set), 0.2)
train_sampler = SubsetRandomSampler(train_indices)
train_ld = DataLoader(data_set, batch_size, sampler=train_sampler)
vld_sampler = SubsetRandomSampler(vld_indices)
vld_ld = DataLoader(data_set, batch_size, sampler=vld_sampler)
return train_ld, vld_ld
def get_default_device():
"""Pick GPU if available, else CPU."""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensors to the chosen device."""
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader:
"""Wrap a data loader to move data to a device."""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a data batch after moving it to device."""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches."""
return len(self.dl)
def loss_batch(model, loss_func, xb, yb, opt=None, metric=None):
"""Calculates the loss and metric value for a batch of data,
and optionally performs gradient descent if an optimizer is provided."""
preds = model(xb)
loss = loss_func(preds, yb)
if opt is not None:
loss.backward() # Compute gradients
opt.step() # Update parameters
opt.zero_grad() # Reset gradients
metric_result = None
if metric is not None:
metric_result = metric(preds, yb) # Compute the metric
return loss.item(), len(xb), metric_result
def evaluate(model, loss_func, valid_dl, metric=None):
""""""
with torch.no_grad():
# Pass each batch through the model
results = [loss_batch(model, loss_func, xb, yb, metric=metric) for xb, yb in valid_dl]
losses, nums, metrics = zip(*results) # Separate losses, counts and metrics
total = np.sum(nums) # Total size of the dataset
avg_loss = np.sum(np.multiply(losses, nums)) / total
avg_metric = None
if metric is not None:
avg_metric = np.sum(np.multiply(metrics, nums)) / total
return avg_loss, total, avg_metric
def fit(epochs, lr, model, loss_func, train_dl, vld_dl, metric=None, opt=None):
""""""
train_losses, vld_losses, vld_metrics = [], [], []
if opt is None:
opt = torch.optim.SGD
opt = opt(model.parameters(), lr=lr)
for epoch in range(epochs):
model.train()
for xb, yb in train_dl: # Training
train_loss, _, _ = loss_batch(model, loss_func, xb, yb, opt)
model.eval()
result = evaluate(model, loss_func, vld_dl, metric) # Evaluation
vld_loss, total, vld_metric = result
vld_losses.append(vld_loss) # Record the loss & metric
vld_metrics.append(vld_metric)
train_losses.append(train_loss)
# print progress
if metric is None:
print('Epoch [{}/{}], train_loss: {:.4f}, validation_loss: {:.4f}'
.format(epoch+1, epochs, train_loss, vld_loss)
)
else:
print('Epoch [{}/{}], train_loss: {:.4f}, validation_loss: {:.4f}, {}: {:.4f}'
.format(epoch+1, epochs, train_loss, vld_loss, metric.__name__, vld_metric)
)
return train_losses, vld_losses, vld_metrics
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1) # Return the max value in each row
return torch.sum(preds == labels).item() / len(preds)
def show_example(img, label):
print('Label: ', dataset.classes[label], '('+str(label)+')')
plt.imshow(img.permute(1, 2, 0))
plt.show()
def show_batch(dl):
for imgs, labels in dl:
fig, ax = plt.subplots(figsize=(10, 10))
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(make_grid(imgs, 10).permute(1, 2, 0)) # Make a grid of images with 10 rows
plt.show()
break
class CiFarModel(nn.Module):
"""Feedforward neural network with 1 hidden layer."""
def __init__(self):
super().__init__()
self.network = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: bs*16*16*16
nn.Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: bs*16*8*8
nn.Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: bs*16*4*4
nn.Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: bs*16*2*2
nn.Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: bs*16*1*1
nn.Flatten(), # output: bs*16
nn.Linear(16, 10) # output: bs*10
)
def forward(self, train_x):
return self.network(train_x)
def plot_metric(metric_values):
"""Plot metric values in a line graph."""
plt.plot(metric_values, '-x')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy vs. No. of epochs')
def plot_losses(train_losses, vld_losses):
plt.plot(train_losses, '-x')
plt.plot(vld_losses, '-x')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['Training', 'Validation'])
plt.title('Loss vs. No. of epochs')
simple_model = nn.Sequential(
nn.Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.MaxPool2d(2, 2)
)
dataset = CIFAR10(root='../data/', train=True, download=True, transform=ToTensor())
test_dataset = CIFAR10(root='../data/', train=False, transform=ToTensor())
device = get_default_device()
train_loader, vld_loader = get_data_loader(dataset, 100)
train_loader = DeviceDataLoader(train_loader, device)
vld_loader = DeviceDataLoader(vld_loader, device)
model = CiFarModel()
to_device(model, device)
num_epochs = 10
opt_fn = torch.optim.Adam
lr = 0.005
train_losses1, vld_losses1, metrics1 = fit(num_epochs, lr, model, F.cross_entropy, train_loader, vld_loader, accuracy, opt_fn)
# for images, labels in train_loader:
# print('images.shape:', images.shape)
# out = simple_model(images)
# print('out.shape:', out.shape)
# break
# model2 = CiFarModel()
# torch.save(model.state_dict(), 'cifar10.pth')
# model2.load_state_dict(torch.load('cifar10.pth'))
``` |
{
"source": "JiekaiJia/KI-in-Medizin",
"score": 2
} |
#### File: JiekaiJia/KI-in-Medizin/metrics.py
```python
from sklearn.metrics import (
classification_report,
confusion_matrix,
f1_score
)
import torch
from utils import get_all_preds
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.sum(preds == labels).item() / len(preds)
def f1_score_(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return f1_score(labels.cpu(), preds.cpu(), average='weighted')
def f1_score_binary(outputs, labels):
TP = 0 # Richtig Positive
TN = 0 # Richtig Negative
FP = 0 # Falsch Positive
FN = 0 # Falsch Negative
_, preds = torch.max(outputs, dim=1)
preds = preds.cpu().numpy().tolist()
labels = labels.cpu().numpy().tolist()
for label, pred in zip(labels, preds):
if label == 1 and pred == 1:
TP = TP + 1
if label == 0 and pred != 1:
TN = TN + 1
if label == 0 and pred == 1:
FP = FP + 1
if label == 1 and pred != 1:
FN = FN + 1
try:
F1 = TP / (TP + 1/2*(FP+FN))
except ZeroDivisionError:
return 1
return F1
def report(model, vld_loader):
model.eval()
with torch.no_grad():
preds, targets = get_all_preds(model, vld_loader)
print('Classification report :')
print(classification_report(targets, preds))
print('Confusion matrix:')
print(confusion_matrix(targets, preds))
```
#### File: JiekaiJia/KI-in-Medizin/utils.py
```python
import numpy as np
import torch
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from tqdm import tqdm
class_distribution = [59.68, 8.68, 28.55, 3.08]
# 2017
class_distribution = [59.22, 8.65, 28.80, 3.33]
def split_indices(n, vld_pct, labels, compensation_factor, random_state=None):
"""This function is used to split the data into train and validation.
Args:
n: the number of train data
vld_pct: the percentage of validation data
random_state: keep the random results same each time calling the function
Returns:
the indexes of 2 divided datasets(train indices, validation indices).
"""
n_vld = int(vld_pct*n) # Determine size of validation set
if random_state:
np.random.seed(random_state) # Set the random seed(for reproducibility)
idxs = np.random.permutation(n) # Create random permutation of 0 to n-1
split_sets = [idxs[:n_vld], idxs[n_vld:2*n_vld], idxs[2*n_vld:3*n_vld], idxs[3*n_vld:4*n_vld], idxs[4*n_vld:]]
train_sets = []
vld_sets = []
for k in range(5):
train_set = np.concatenate((split_sets[k], split_sets[(k+1)%5], split_sets[(k+2)%5], split_sets[(k+3)%5]))
masks = [labels[train_set, i].astype(bool) for i in range(labels.shape[1])]
sets = [train_set[mask] for mask in masks]
lst = []
for idx, set_ in enumerate(sets):
scale = int(100 * compensation_factor / class_distribution[idx]) + 1
set_ = np.tile(set_, scale)
set_ = set_.reshape([-1, 1])
lst.append(set_)
train_set = np.vstack(lst)
train_set = train_set.squeeze()
np.random.shuffle(train_set)
train_sets.append(train_set)
vld_sets.append(split_sets[k-1])
if n_vld == 0:
train_sets = []
vld_sets = []
train_set = idxs
masks = [labels[:, i].astype(bool) for i in range(labels.shape[1])]
sets = [train_set[mask] for mask in masks]
lst = []
for idx, set_ in enumerate(sets):
scale = int(100 * compensation_factor / class_distribution[idx]) + 1
set_ = np.tile(set_, scale)
set_ = set_.reshape([-1, 1])
lst.append(set_)
train_set = np.vstack(lst)
train_set = train_set.squeeze()
np.random.shuffle(train_set)
train_sets.append(train_set)
vld_sets.append(idxs)
return train_sets, vld_sets # Pick the first n_vld indices for validation set
def get_data_loader(train_dataset, vld_dataset, batch_size, onehot_labels, compensation_factor):
"""This function generate the batch data for every epoch."""
train_indices, vld_indices = split_indices(len(train_dataset), 0, onehot_labels, compensation_factor, random_state=2021)
train_lds = []
vld_lds = []
for train_idx, vld_idx in zip(train_indices, vld_indices):
train_sampler = SubsetRandomSampler(train_idx)
train_ld = DataLoader(train_dataset, batch_size, sampler=train_sampler)
vld_ld = DataLoader(vld_dataset, batch_size, sampler=vld_idx)
train_lds.append(train_ld)
vld_lds.append(vld_ld)
return train_lds, vld_lds
def get_default_device():
"""Pick GPU if available, else CPU."""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensors to the chosen device."""
if isinstance(data, (list, tuple)):
return [to_device(x, device) if not isinstance(x, str) else x for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader:
"""Wrap a data loader to move data to a device."""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a data batch after moving it to device."""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches."""
return len(self.dl)
@torch.no_grad()
def get_all_preds(model, loader):
"""Output model's predictions and targets.
:param model:
:param loader:
:return:
"""
device = get_default_device()
all_preds = to_device(torch.tensor([]), device)
all_labels = to_device(torch.tensor([]), device)
all_names = []
for batch in tqdm(loader):
signals, labels = batch
preds = model(signals)
try:
all_labels = torch.cat((all_labels, labels), dim=0)
except TypeError:
all_names.extend(labels)
all_preds = torch.cat(
(all_preds, preds)
, dim=0
)
_, predicted = torch.max(all_preds, dim=1)
if all_names:
return predicted.cpu(), all_names
return predicted.cpu(), all_labels.cpu()
class EarlyStopping:
"""Early stopping to stop the training when the loss does not improve after certain epochs."""
def __init__(self, patience=100, mode='max'):
"""
:param patience: how many epochs to wait before stopping when loss is not improving
"""
self.patience = patience
self.mode = mode
self.counter = 0
self.best_metric = None
self.early_stop = False
def __call__(self, val_metric):
if self.best_metric is None:
self.best_metric = val_metric
elif self.best_metric > val_metric:
if self.mode == 'max':
self.counter += 1
else:
self.best_metric = val_metric
elif self.best_metric < val_metric:
if self.mode == 'max':
self.best_metric = val_metric
else:
self.counter += 1
else:
self.counter += 1
print(f'INFO: Early stopping counter {self.counter} of {self.patience}')
if self.counter >= self.patience:
print('INFO: Early stopping')
self.early_stop = True
def load_model(model, path, evaluation=False):
"""Load the saved model."""
device = get_default_device()
model.load_state_dict(torch.load(path, map_location=torch.device(device)))
if evaluation:
# If the model is used to evaluation, the requires grad should be disabled.
for parameter in model.parameters():
parameter.requires_grad = False
return model
device = get_default_device()
def get_length(data):
# data shape [b, c, t, f]
shape = list(data.shape)
maps, _ = torch.max(torch.abs(data), 1)
# data shape [b, t, f]
used = torch.sign(maps)
used = used.int()
t_range = torch.arange(0, shape[2], device=device).unsqueeze(1)
ranged = t_range * used
length, _ = torch.max(ranged, 1)
# data shape [b, f]
length, _ = torch.max(length, 1)
# data shape [b]
length = length + 1
return length
def set_zeros(data, length):
shape = list(data.shape)
# generate data shape matrix with time range with padding
r = torch.arange(0, shape[1], device=device)
r = torch.unsqueeze(r, 0)
r = torch.unsqueeze(r, 2)
r = r.repeat(shape[0], 1, shape[2])
# generate data shape matrix with time range without padding
l = torch.unsqueeze(length, 1)
l = torch.unsqueeze(l, 2)
l = l.repeat(1, shape[1], shape[2])
# when col_n smaller than length mask entry is true
mask = torch.lt(r, l)
# when col_n larger than length, set input to zero
output = torch.where(mask, data, torch.zeros_like(data))
return output
def class_penalty(class_distribution, class_penalty=0.2):
eq_w = [1 for _ in class_distribution]
occ_w = [100/r for r in class_distribution]
c = class_penalty
weights = [[e * (1-c) + o * c for e,o in zip(eq_w, occ_w)]]
class_weights = torch.Tensor(weights)
return class_weights.to(device)
``` |
{
"source": "JiekaiJia/pettingzoo_comunication",
"score": 2
} |
#### File: JiekaiJia/pettingzoo_comunication/utils.py
```python
import numpy as np
from pettingzoo.utils.conversions import to_parallel_wrapper
from pettingzoo.utils.wrappers import AssertOutOfBoundsWrapper, OrderEnforcingWrapper
from ray.rllib.env import PettingZooEnv
from ray.rllib.env.wrappers.pettingzoo_env import ParallelPettingZooEnv
from supersuit import pad_action_space_v0, pad_observations_v0
from comm_channel import ParallelCommWrapper, CommWrapper
def main_comm_env(base_env, comm_dict):
"""Wrap the communication channel into Pettingzoo main environment, and padding the environment."""
def comm_env(**kwargs):
raw_env = base_env.raw_env(**kwargs)
# Set all agents to silent
for agent in raw_env.world.agents:
agent.silent = True
env = AssertOutOfBoundsWrapper(raw_env)
env = OrderEnforcingWrapper(env)
env = CommWrapper(env, comm_dict)
env = pad_observations_v0(env)
env = pad_action_space_v0(env)
env = _PettingZooEnv(env)
return env
return comm_env
def main_env(base_env):
"""Padding the environment."""
def env(**kwargs):
env = base_env.env(**kwargs)
env = pad_observations_v0(env)
env = pad_action_space_v0(env)
env = _PettingZooEnv(env)
return env
return env
def parallel_comm_env(base_env, comm_dict):
"""Wrap the communication channel into Pettingzoo parallel environment, and padding the environment."""
def comm_env(**kwargs):
raw_env = base_env.raw_env(**kwargs)
# Set all agents to silent
for agent in raw_env.world.agents:
agent.silent = True
env = AssertOutOfBoundsWrapper(raw_env)
env = OrderEnforcingWrapper(env)
env = to_parallel_wrapper(env)
env = ParallelCommWrapper(env, comm_dict)
env = pad_observations_v0(env)
env = pad_action_space_v0(env)
env = _ParallelPettingZooEnv(env)
return env
return comm_env
def parallel_env(base_env):
"""Padding the parallel environment."""
def env(**kwargs):
env = base_env.parallel_env(**kwargs)
env = pad_observations_v0(env)
env = pad_action_space_v0(env)
env = _ParallelPettingZooEnv(env)
return env
return env
class _PettingZooEnv(PettingZooEnv):
def __init__(self, env):
super().__init__(env)
def step(self, action_dict):
# Ensure the input actions are discrete number.
for k, v in action_dict.items():
if isinstance(v, (np.int64, np.int32, np.int16, np.int8, int)):
pass
elif not v:
pass
else:
action_dict[k] = np.argmax(v)
return super().step(action_dict)
class _ParallelPettingZooEnv(ParallelPettingZooEnv):
def __init__(self, env):
super().__init__(env)
def step(self, action_dict):
# Ensure the input actions are discrete number.
for k, v in action_dict.items():
if isinstance(v, (np.int64, np.int32, np.int16, np.int8, int)):
pass
else:
action_dict[k] = np.argmax(v)
return super().step(action_dict)
def init_comm_dict(env):
return {'comm_bits': 0, 'receivers': {agent: [] for agent in env.possible_agents}}
``` |
{
"source": "JiekangHuang/am7020_raspberry",
"score": 3
} |
#### File: JiekangHuang/am7020_raspberry/am7020_mqtt.py
```python
from time import time, sleep, ctime
from sim7020.sim7020_nb import SIM7020NB
from sim7020.sim7020_mqtt import SIM7020MQTT
apn = "twm.nbiot"
band = 28
MQTT_BROKER = "test.mosquitto.org"
PORT = 1883
MQTT_USERNAME = ""
MQTT_PASSWORD = ""
TEST_TOPIC = "temp/humidity"
UPLOAD_INTERVAL = 60
nb = SIM7020NB("/dev/ttyS0", 115200, 18)
mqtt = SIM7020MQTT(nb)
def nbConnect():
print("Initializing modem...")
while((not nb.init() or (not nb.nbiotConnect(apn, band)))):
print(".")
print("Waiting for network...")
while(not nb.waitForNetwork()):
print(".")
sleep(5)
print(" success")
def reConnBroker():
if(not mqtt.chkConnBroker()):
print("Connecting to", MQTT_BROKER, end="...")
if(mqtt.connBroker(MQTT_BROKER, PORT, mqtt_id="MY_AM7020_TEST_MQTTID")):
print(" success")
print("subscribe: ", TEST_TOPIC, end="")
if(mqtt.subscribe(TEST_TOPIC, callback1)):
print(" success")
else:
print(" fail")
else:
print(" fail")
def callback1(msg):
print(TEST_TOPIC, ":", msg)
def main():
nbConnect()
reConnBroker()
chk_net_timer = 0
pub_data_timer = 0
while(True):
if(time() > chk_net_timer):
chk_net_timer = time() + 10
if(not nb.chkNet()):
nbConnect()
reConnBroker()
if(time() > pub_data_timer):
pub_data_timer = time() + UPLOAD_INTERVAL
print("publish: ", ctime(), end="")
if(mqtt.publish(TEST_TOPIC, str(ctime()))):
print(" success")
else:
print(" Fail")
mqtt.procSubs()
main()
```
#### File: JiekangHuang/am7020_raspberry/iot_am7020.py
```python
from time import time, sleep
from sim7020.sim7020_nb import SIM7020NB
from sim7020.sim7020_mqtt import SIM7020MQTT
from tsl2561 import TSL2561
tsl = TSL2561(debug=True)
apn = "twm.nbiot"
band = 28
MQTT_BROKER = "io.adafruit.com"
PORT = 1883
MQTT_USERNAME = "Zack_Huang"
MQTT_PASSWORD = "<PASSWORD>"
# topics
LUX = "Zack_Huang/feeds/pi3.lux"
UPLOAD_INTERVAL = 60
nb = SIM7020NB("/dev/ttyS0", 115200, 18)
mqtt = SIM7020MQTT(nb)
def nbConnect():
print("Initializing modem...")
while((not nb.init() or (not nb.nbiotConnect(apn, band)))):
print(".")
print("Waiting for network...")
while(not nb.waitForNetwork()):
print(".")
sleep(5)
print("success")
def reConnBroker():
if(not mqtt.chkConnBroker()):
print("Connecting to", MQTT_BROKER, "...")
if(mqtt.connBroker(MQTT_BROKER, PORT, username=MQTT_USERNAME, password=MQTT_PASSWORD, mqtt_id="MY_AM7020_TEST_MQTTID")):
print("success")
else:
print("fail")
def pubAdafruitIO(topic, msg):
print("publish {} to {}".format(msg, topic))
if(mqtt.publish(topic, msg)):
print("success")
else:
print("Fail")
def main():
nbConnect()
reConnBroker()
chk_net_timer = 0
pub_data_timer = 0
get_lux_timer = 0
lux = 0
while(True):
if(time() > get_lux_timer):
get_lux_timer = time() + 5
lux = tsl.lux()
if(time() > chk_net_timer):
chk_net_timer = time() + 10
if(not nb.chkNet()):
nbConnect()
reConnBroker()
if(time() > pub_data_timer):
pub_data_timer = time() + UPLOAD_INTERVAL
pubAdafruitIO(LUX, lux)
mqtt.procSubs()
main()
```
#### File: am7020_raspberry/sim7020/sim7020_modem.py
```python
import serial
from time import time, sleep
from gpiozero import LED
GSM_OK = "OK\r\n"
GSM_ERROR = "ERROR\r\n"
class SIM7020Modem:
def __init__(self, port, baudrate, reset_pin, dump_at_cmd=False):
self._at = serial.Serial(port, baudrate, timeout=0.05)
self._reset_pin = LED(reset_pin)
self.dump_at_cmd = dump_at_cmd
def atWrite(self, cmd):
if(self.dump_at_cmd):
print(cmd, end="")
cmd = bytes(cmd, 'utf-8')
self._at.write(cmd)
def atRead(self, numChars=1):
try:
cmd = self._at.read(numChars).decode("utf-8")
if(self.dump_at_cmd):
print(cmd, end="")
return cmd
except (KeyboardInterrupt, SystemExit):
raise
except:
print("decode error !")
return ""
def restart(self):
self._reset_pin.off()
sleep(0.5)
self._reset_pin.on()
sleep(5)
def testAT(self, timeout_s=10):
startTime = time()
while(time() - startTime < timeout_s):
self.sendAT()
if(self.waitResponse(0.2) == 1):
return True
sleep(0.1)
return False
def streamWrite(self, *args):
cmd = ""
for arg in args:
cmd += str(arg)
self.atWrite(cmd)
def sendAT(self, *args):
cmd = "AT"
for arg in args:
cmd += str(arg)
cmd += "\r\n"
self.atWrite(cmd)
def streamRead(self):
self.atRead()
def streamGetLength(self, numChars, timeout_s=1):
startTime = time()
data = ""
while(time() - startTime < timeout_s):
data += self.atRead(numChars)
if(data != "" and len(data) == numChars):
return data
def streamGetIntBefore(self, lastChar, timeout_s=1):
startTime = time()
data = ""
while(time() - startTime < timeout_s):
data += self.atRead()
if(data != "" and data.endswith(lastChar)):
return int(data[:-1])
return -9999
def streamGetStringBefore(self, lastChar, timeout_s=1):
startTime = time()
data = ""
while(time() - startTime < timeout_s):
data += self.atRead()
if(data != "" and data.endswith(lastChar)):
return data[:-1]
return ""
def streamSkipUntil(self, c, timeout_s=1):
startTime = time()
while(time() - startTime < timeout_s):
ch = self.atRead()
if(ch == c):
return True
return False
def waitResponse(self, timeout_s=1, r1=GSM_OK, r2=GSM_ERROR, r3=None, r4=None, r5=None):
index = 0
startTime = time()
data = ""
while(True):
data += self.atRead()
if(r1 and data.endswith(r1)):
index = 1
break
elif(r2 and data.endswith(r2)):
index = 2
break
elif(r3 and data.endswith(r3)):
index = 3
break
elif(r4 and data.endswith(r4)):
index = 4
break
elif(r5 and data.endswith(r5)):
index = 5
break
if(time()-startTime > timeout_s):
break
return index
``` |
{
"source": "JiekangHuang/VMX_Pi_Example",
"score": 2
} |
#### File: java/python_script/WTD_Barcode.py
```python
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from networktables import NetworkTables
from networktables.util import ntproperty
import threading
import os
# Create the barcodes file
f = open('/home/pi/barcodes.txt', 'w')
f.close()
# Create thread to make sure networktables is connected
cond = threading.Condition()
notified = [False]
# Create a listener
def connectionListener(connected, info):
with cond:
notified[0] = True
cond.notify()
# Instantiate NetworkTables
NetworkTables.initialize(server="10.12.34.2")
NetworkTables.addConnectionListener(connectionListener, immediateNotify=True)
# Wait until connected
with cond:
if not notified[0]:
cond.wait()
# Create the vision Table
ntBarcodeData = ntproperty('/Vision/barcodeData', "null")
ntBarcodeType = ntproperty('/Vision/barcodeType', "null")
ntReadBarcode = ntproperty('/Vision/readBarcode', False)
# Get Table
table = NetworkTables.getTable('Vision')
# Create the system handler
class MyHandler(FileSystemEventHandler):
def on_modified(self, event):
try:
file = open('./barcodes.txt', 'r')
table.putString('barcodeData', file.readline())
table.putString('barcodeType', file.readline())
file.close()
except:
pass # when file is not created yet
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, path='./barcodes.txt', recursive=False)
observer.start()
# The forever loop
while(True):
if table.getBoolean('readBarcode', False) == True:
table.putBoolean('readBarcode', False)
os.system('python3 /home/pi/readBarcode.py')
try:
pass
except KeyboardInterrupt:
observer.stop()
``` |
{
"source": "jiekeshi/CovTesting_Replication",
"score": 2
} |
#### File: CovTesting_Replication/Comparison of Attack Images/attack.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import argparse
import sys, logging
import time
from datetime import datetime
import pytz
import numpy as np
import warnings
warnings.filterwarnings("ignore", message=r"Passing", category=FutureWarning)
import tensorflow.keras as keras
from tensorflow.keras import backend as K
## load mine trained model
from tensorflow.keras.models import load_model
import art
from art.estimators.classification import TensorFlowV2Classifier
from art.attacks.evasion import FastGradientMethod
from art.attacks.evasion import CarliniLInfMethod
from art.attacks.evasion import ProjectedGradientDescent
from art.attacks.evasion import BasicIterativeMethod
from art.attacks.evasion import SaliencyMapMethod
from art.attacks.evasion import AutoProjectedGradientDescent
from art.attacks.evasion import DeepFool, NewtonFool
from art.attacks.evasion import SquareAttack, SpatialTransformation
from art.attacks.evasion import ShadowAttack, Wasserstein
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from tensorflow.python.client import device_lib
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
## custom time zone for logger
def customTime(*args):
utc_dt = pytz.utc.localize(datetime.utcnow())
converted = utc_dt.astimezone(pytz.timezone("Singapore"))
return converted.timetuple()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
## [original from FSE author] for solving some specific problems, don't care
# config = tf.compat.v1.ConfigProto()
# config.gpu_options.allow_growth = True
# sess = tf.compat.v1.Session(config=config)
# VERBOSE = False
VERBOSE = True
DATA_DIR = "../data/"
MODEL_DIR = "../models/"
MNIST = "mnist"
CIFAR = "cifar"
SVHN = "svhn"
DATASET_NAMES = [MNIST, CIFAR, SVHN]
BIM = "bim"
CW = "cw"
FGSM = "fgsm"
JSMA = "jsma"
PGD = "pgd"
APGD = "apgd"
DF = "deepfool"
NF = "newtonfool"
SA = "squareattack"
SHA = "shadowattack"
ST = "spatialtransformation"
WA = "wasserstein"
ATTACK_NAMES = [APGD, BIM, CW, DF, FGSM, JSMA, NF, PGD, SA, SHA, ST, WA]
## Note: already tried APGD, but it doesn't work
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
def train_step(model, images, labels):
with tf.GradientTape() as tape:
predictions = model(images, training=True)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
## classifier paramaters
## depend to the dataset used
classifier_params = {}
for dataset_name in DATASET_NAMES :
classifier_params[dataset_name] = {"loss_object": loss_object, "train_step": train_step}
classifier_params[MNIST]["nb_classes"] = 10
classifier_params[MNIST]["input_shape"] = (28, 28, 1)
classifier_params[MNIST]["clip_values"] = (-0.5, 0.5)
classifier_params[CIFAR]["nb_classes"] = 10
classifier_params[CIFAR]["input_shape"] = (32, 32, 3)
classifier_params[SVHN]["nb_classes"] = 10
classifier_params[SVHN]["input_shape"] = (32, 32, 3)
classifier_params[SVHN]["clip_values"] = (-0.5, 0.5)
## attack parameters for generating adversarial images
## Note: I use the same format with FSE paper, i.e. params[<attack>][<dataset>]
## we may change the format into params[<dataset>][<attack>]
## to track the consistent epsilon for each dataset
attack_params = {}
## TO DO: read the paper for each attack
## make sure to use the correct parameters
## for each combination of attack and dataset
# empty means using the original parameters for ART
attack_params[APGD] = {}
for dataset_name in DATASET_NAMES:
attack_params[APGD][dataset_name] = {"loss_type": "cross_entropy"}
attack_params[CW] = {}
for dataset_name in DATASET_NAMES :
attack_params[CW][dataset_name] = {}
attack_params[DF] = {}
for dataset_name in DATASET_NAMES:
attack_params[DF][dataset_name] = {"batch_size": 256}
attack_params[NF] = {}
for dataset_name in DATASET_NAMES:
attack_params[NF][dataset_name] = {"batch_size": 256}
attack_params[JSMA] = {}
for dataset_name in DATASET_NAMES:
attack_params[JSMA][dataset_name] = {}
attack_params[SA] = {}
for dataset_name in DATASET_NAMES:
attack_params[SA][dataset_name] = {}
attack_params[SHA] = {}
for dataset_name in DATASET_NAMES:
attack_params[SHA][dataset_name] = {"batch_size": 1}
attack_params[ST] = {}
for dataset_name in DATASET_NAMES:
attack_params[ST][dataset_name] = {}
attack_params[WA] = {}
for dataset_name in DATASET_NAMES:
attack_params[WA][dataset_name] = {}
attack_params[PGD] = {}
attack_params[PGD][MNIST] = {'eps': .3,
'eps_step': .03,
'max_iter': 20
}
attack_params[PGD][CIFAR] = {'eps': 16. / 255.,
'eps_step': 2. / 255.,
'max_iter': 30
}
attack_params[PGD][SVHN] = {'eps': 8. / 255.,
'eps_step': 0.01,
'max_iter': 30
}
# use the same epsilon used in pgd
attack_params[BIM] = {}
attack_params[BIM][MNIST] = {'eps': .3
}
attack_params[BIM][CIFAR] = {'eps': 16. / 255.
}
attack_params[BIM][SVHN] = {'eps': 8. / 255.
}
# use the same epsilon used in pgd
attack_params[FGSM] = {}
attack_params[FGSM][MNIST] = {'eps': .3
}
attack_params[FGSM][CIFAR] = {'eps': 16. / 255.
}
attack_params[FGSM][SVHN] = {'eps': 8. / 255.
}
def call_function_by_attack_name(attack_name):
if attack_name not in ATTACK_NAMES:
print('Unsupported attack: {}'.format(attack_name))
sys.exit(1)
return {
APGD: AutoProjectedGradientDescent,
BIM: BasicIterativeMethod,
CW: CarliniLInfMethod,
DF: DeepFool,
FGSM: FastGradientMethod,
JSMA: SaliencyMapMethod,
NF: NewtonFool,
PGD: ProjectedGradientDescent,
SA: SquareAttack,
SHA: ShadowAttack,
ST: SpatialTransformation,
WA: Wasserstein
}[attack_name]
# integrate all attack method in one function and only construct graph once
def gen_adv_data(model, x, y, attack_name, dataset_name, batch_size=2048):
logging.getLogger().setLevel(logging.CRITICAL)
classifier_param = classifier_params[dataset_name]
classifier = TensorFlowV2Classifier(model=model, **classifier_param)
attack_param = attack_params[attack_name][dataset_name]
if attack_name not in [ST] :
if "batch_size" not in attack_param :
attack_param["batch_size"] = batch_size
if attack_name not in [FGSM, BIM] : ## some attacks don't have verbose parameter, e.g. bim
attack_param["verbose"] = VERBOSE
attack = call_function_by_attack_name(attack_name)(classifier, **attack_param)
data_num = x.shape[0]
adv_x = attack.generate(x=x, y=y)
logging.getLogger().setLevel(logging.INFO)
return adv_x
# the data is in range(-.5, .5)
def load_data(dataset_name):
assert dataset_name in DATASET_NAMES
x_train = np.load(DATA_DIR + dataset_name + '/benign/x_train.npy')
y_train = np.load(DATA_DIR + dataset_name + '/benign/y_train.npy')
x_test = np.load(DATA_DIR + dataset_name + '/benign/x_test.npy')
y_test = np.load(DATA_DIR + dataset_name + '/benign/y_test.npy')
return x_train, y_train, x_test, y_test
def softmax(x):
exp_x = np.exp(x)
return exp_x / np.sum(exp_x)
def accuracy(model, x, labels):
assert (x.shape[0] == labels.shape[0])
num = x.shape[0]
y = model.predict(x)
y = y.argmax(axis=-1)
labels = labels.argmax(axis=-1)
idx = (labels == y)
return 100 * np.sum(idx) / num
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Attack for DNN')
parser.add_argument(
'--dataset', help="Model Architecture", type=str, default="mnist")
parser.add_argument(
'--model', help="Model Architecture", type=str, default="lenet1")
parser.add_argument(
'--attack', help="Adversarial examples", type=str, default="fgsm")
parser.add_argument(
'--batch_size', help="batch size for generating adversarial examples", type=int, default=1024)
args = parser.parse_args()
dataset_name = args.dataset
model_name = args.model
attack_name = args.attack
## Prepare directory for saving adversarial images and logging
adv_dir = "{}{}/adv/{}/{}/".format(
DATA_DIR, dataset_name, model_name, attack_name)
if not os.path.exists(adv_dir):
os.makedirs(adv_dir)
logging.basicConfig(
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
handlers=[
logging.FileHandler(
os.path.join(adv_dir, 'output.log')),
logging.StreamHandler()
])
logging.Formatter.converter = customTime
logger = logging.getLogger("adversarial_images_generation")
## Load benign images from mnist, cifar, or svhn
x_train, y_train, x_test, y_test = load_data(dataset_name)
## Load keras pretrained model for the specific dataset
model_path = "{}{}/{}.h5".format(MODEL_DIR, dataset_name, model_name)
model = load_model(model_path)
model.summary()
logger.info("")
logger.info("Generating Adversarial Images")
logger.info("Use GPU: {}".format(len(get_available_gpus()) > 0))
if len(get_available_gpus()) > 0 :
logger.info("Available GPUs: {}".format(get_available_gpus()))
logger.info("Dataset: {}".format(dataset_name))
logger.info("Model: {}".format(model_name))
logger.info("Attack: {}".format(attack_name))
## Check the accuracy of the original model on benign images
acc = accuracy(model, x_test, y_test)
logger.info("Model accuracy on benign images: {:.2f}%".format(acc))
## Generate adversarial images
x_adv = gen_adv_data(model, x_test, y_test, attack_name,
dataset_name, args.batch_size)
## Check the accuracy of the original model on adversarial images
acc = accuracy(model, x_adv, y_test)
logger.info("Model accuracy on adversarial images: {:.2f}%".format(acc))
## Save the adversarial images into external file
x_adv_path = "{}x_test.npy".format(adv_dir)
np.save(x_adv_path, x_adv)
## Note: y_test will exactly be the same with the benign y_test
## thus it's not a must to save the y_test
y_adv_path = "{}y_test.npy".format(adv_dir)
np.save(y_adv_path, y_test)
logger.info("Adversarial images are saved at {}".format(adv_dir))
```
#### File: CovTesting_Replication/Comparison of Attack Images/mutators.py
```python
from __future__ import print_function
from importlib import reload
import sys
import cv2
import numpy as np
import random
import time
import copy
reload(sys)
# sys.setdefaultencoding('utf8')
# keras 1.2.2 tf:1.2.0
class Mutators():
def image_translation(img, params):
rows, cols, ch = img.shape
# rows, cols = img.shape
# M = np.float32([[1, 0, params[0]], [0, 1, params[1]]])
M = np.float32([[1, 0, params], [0, 1, params]])
dst = cv2.warpAffine(img, M, (cols, rows))
return dst
def image_scale(img, params):
# res = cv2.resize(img, None, fx=params[0], fy=params[1], interpolation=cv2.INTER_CUBIC)
rows, cols, ch = img.shape
res = cv2.resize(img, None, fx=params, fy=params, interpolation=cv2.INTER_CUBIC)
res = res.reshape((res.shape[0],res.shape[1],ch))
y, x, z = res.shape
if params > 1: # need to crop
startx = x // 2 - cols // 2
starty = y // 2 - rows // 2
return res[starty:starty + rows, startx:startx + cols]
elif params < 1: # need to pad
sty = (rows - y) // 2
stx = (cols - x) // 2
return np.pad(res, [(sty, rows - y - sty), (stx, cols - x - stx), (0, 0)], mode='constant', constant_values=0)
return res
def image_shear(img, params):
rows, cols, ch = img.shape
# rows, cols = img.shape
factor = params * (-1.0)
M = np.float32([[1, factor, 0], [0, 1, 0]])
dst = cv2.warpAffine(img, M, (cols, rows))
return dst
def image_rotation(img, params):
rows, cols, ch = img.shape
# rows, cols = img.shape
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), params, 1)
dst = cv2.warpAffine(img, M, (cols, rows), flags=cv2.INTER_AREA)
return dst
def image_contrast(img, params):
alpha = params
new_img = cv2.multiply(img, np.array([alpha])) # mul_img = img*alpha
# new_img = cv2.add(mul_img, beta) # new_img = img*alpha + beta
return new_img
def image_brightness(img, params):
beta = params
new_img = cv2.add(img, beta) # new_img = img*alpha + beta
return new_img
def image_blur(img, params):
# print("blur")
blur = []
if params == 1:
blur = cv2.blur(img, (3, 3))
if params == 2:
blur = cv2.blur(img, (4, 4))
if params == 3:
blur = cv2.blur(img, (5, 5))
if params == 4:
blur = cv2.GaussianBlur(img, (3, 3), 0)
if params == 5:
blur = cv2.GaussianBlur(img, (5, 5), 0)
if params == 6:
blur = cv2.GaussianBlur(img, (7, 7), 0)
if params == 7:
blur = cv2.medianBlur(img, 3)
if params == 8:
blur = cv2.medianBlur(img, 5)
# if params == 9:
# blur = cv2.blur(img, (6, 6))
if params == 9:
blur = cv2.bilateralFilter(img, 6, 50, 50)
# blur = cv2.bilateralFilter(img, 9, 75, 75)
return blur
def image_pixel_change(img, params):
# random change 1 - 5 pixels from 0 -255
img_shape = img.shape
img1d = np.ravel(img)
arr = np.random.randint(0, len(img1d), params)
for i in arr:
img1d[i] = np.random.randint(0, 256)
new_img = img1d.reshape(img_shape)
return new_img
def image_noise(img, params):
if params == 1: # Gaussian-distributed additive noise.
row, col, ch = img.shape
mean = 0
var = 0.1
sigma = var ** 0.5
gauss = np.random.normal(mean, sigma, (row, col, ch))
gauss = gauss.reshape(row, col, ch)
noisy = img + gauss
return noisy.astype(np.uint8)
elif params == 2: # Replaces random pixels with 0 or 1.
s_vs_p = 0.5
amount = 0.004
out = np.copy(img)
# Salt mode
num_salt = np.ceil(amount * img.size * s_vs_p)
coords = [np.random.randint(0, i, int(num_salt))
for i in img.shape]
out[tuple(coords)] = 1
# Pepper mode
num_pepper = np.ceil(amount * img.size * (1. - s_vs_p))
coords = [np.random.randint(0, i, int(num_pepper))
for i in img.shape]
out[tuple(coords)] = 0
return out
elif params == 3: # Multiplicative noise using out = image + n*image,where n is uniform noise with specified mean & variance.
row, col, ch = img.shape
gauss = np.random.randn(row, col, ch)
gauss = gauss.reshape(row, col, ch)
noisy = img + img * gauss
return noisy.astype(np.uint8)
```
#### File: jiekeshi/CovTesting_Replication/correlation.py
```python
from scipy.stats import ttest_ind, kendalltau, pearsonr, spearmanr, mannwhitneyu, wilcoxon
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
from numpy import mean
from numpy import var, std
from math import sqrt
def func(a, b):
Lens = len(a)
ties_onlyin_x = 0
ties_onlyin_y = 0
con_pair = 0
dis_pair = 0
for i in range(Lens - 1):
for j in range(i + 1, Lens):
test_tying_x = np.sign(a[i] - a[j])
test_tying_y = np.sign(b[i] - b[j])
panduan = test_tying_x * test_tying_y
if panduan == 1:
con_pair += 1
elif panduan == -1:
dis_pair += 1
if test_tying_y == 0 and test_tying_x != 0:
ties_onlyin_y += 1
elif test_tying_x == 0 and test_tying_y != 0:
ties_onlyin_x += 1
if (con_pair + dis_pair + ties_onlyin_x) * (dis_pair + con_pair + ties_onlyin_y) == 0:
k = 10**-1
else:
k = (con_pair + dis_pair + ties_onlyin_x) * (dis_pair + con_pair + ties_onlyin_y)
Kendallta1 = (con_pair - dis_pair) / np.sqrt(k)
return Kendallta1
import numpy as np
# nc_1 = []
# nc_3 = []
# nc_5 = []
# nc_7 = []
# nc_9 = []
# tknc = []
# tknp = []
# kmnc = []
# nbc = []
# snac = []
# for i in range(0, 11):
# with open("./mnist/lenet1/improve/coverage_result_" + str(i) + ".txt") as f:
# results = f.read()
# nc_1.append(float(results.split("\n")[3].split(" ")[1]))
# nc_3.append(float(results.split("\n")[4].split(" ")[1]))
# nc_5.append(float(results.split("\n")[5].split(" ")[1]))
# nc_7.append(float(results.split("\n")[6].split(" ")[1]))
# nc_9.append(float(results.split("\n")[7].split(" ")[1]))
# tknc.append(float(results.split("\n")[8].split(" ")[1]))
# tknp.append(float(results.split("\n")[9].split(" ")[1]))
# kmnc.append(float(results.split("\n")[10].split(" ")[1]))
# nbc.append(float(results.split("\n")[11].split(" ")[1]))
# snac.append(float(results.split("\n")[12].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./mnist/lenet1/no_improve/coverage_result_" + str(i) + ".txt") as f:
# # results = f.read()
# # nc_1.append(float(results.split("\n")[3].split(" ")[1]))
# # nc_3.append(float(results.split("\n")[4].split(" ")[1]))
# # nc_5.append(float(results.split("\n")[5].split(" ")[1]))
# # nc_7.append(float(results.split("\n")[6].split(" ")[1]))
# # nc_9.append(float(results.split("\n")[7].split(" ")[1]))
# # tknc.append(float(results.split("\n")[8].split(" ")[1]))
# # tknp.append(float(results.split("\n")[9].split(" ")[1]))
# # kmnc.append(float(results.split("\n")[10].split(" ")[1]))
# # nbc.append(float(results.split("\n")[11].split(" ")[1]))
# # snac.append(float(results.split("\n")[12].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./mnist/lenet4/improve/coverage_result_" + str(i) + ".txt") as f:
# # results = f.read()
# # nc_1.append(float(results.split("\n")[3].split(" ")[1]))
# # nc_3.append(float(results.split("\n")[4].split(" ")[1]))
# # nc_5.append(float(results.split("\n")[5].split(" ")[1]))
# # nc_7.append(float(results.split("\n")[6].split(" ")[1]))
# # nc_9.append(float(results.split("\n")[7].split(" ")[1]))
# # tknc.append(float(results.split("\n")[8].split(" ")[1]))
# # tknp.append(float(results.split("\n")[9].split(" ")[1]))
# # kmnc.append(float(results.split("\n")[10].split(" ")[1]))
# # nbc.append(float(results.split("\n")[11].split(" ")[1]))
# # snac.append(float(results.split("\n")[12].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./mnist/lenet5/improve/coverage_result_" + str(i) + ".txt") as f:
# # results = f.read()
# # nc_1.append(float(results.split("\n")[3].split(" ")[1]))
# # nc_3.append(float(results.split("\n")[4].split(" ")[1]))
# # nc_5.append(float(results.split("\n")[5].split(" ")[1]))
# # nc_7.append(float(results.split("\n")[6].split(" ")[1]))
# # nc_9.append(float(results.split("\n")[7].split(" ")[1]))
# # tknc.append(float(results.split("\n")[8].split(" ")[1]))
# # tknp.append(float(results.split("\n")[9].split(" ")[1]))
# # kmnc.append(float(results.split("\n")[10].split(" ")[1]))
# # nbc.append(float(results.split("\n")[11].split(" ")[1]))
# # snac.append(float(results.split("\n")[12].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./svhn/svhn_first/improve/coverage_result_" + str(i) + ".txt") as f:
# # results = f.read()
# # nc_1.append(float(results.split("\n")[3].split(" ")[1]))
# # nc_3.append(float(results.split("\n")[4].split(" ")[1]))
# # nc_5.append(float(results.split("\n")[5].split(" ")[1]))
# # nc_7.append(float(results.split("\n")[6].split(" ")[1]))
# # nc_9.append(float(results.split("\n")[7].split(" ")[1]))
# # tknc.append(float(results.split("\n")[8].split(" ")[1]))
# # tknp.append(float(results.split("\n")[9].split(" ")[1]))
# # kmnc.append(float(results.split("\n")[10].split(" ")[1]))
# # nbc.append(float(results.split("\n")[11].split(" ")[1]))
# # snac.append(float(results.split("\n")[12].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./svhn/svhn_model/improve/coverage_result_" + str(i) + ".txt") as f:
# # results = f.read()
# # nc_1.append(float(results.split("\n")[3].split(" ")[1]))
# # nc_3.append(float(results.split("\n")[4].split(" ")[1]))
# # nc_5.append(float(results.split("\n")[5].split(" ")[1]))
# # nc_7.append(float(results.split("\n")[6].split(" ")[1]))
# # nc_9.append(float(results.split("\n")[7].split(" ")[1]))
# # tknc.append(float(results.split("\n")[8].split(" ")[1]))
# # tknp.append(float(results.split("\n")[9].split(" ")[1]))
# # kmnc.append(float(results.split("\n")[10].split(" ")[1]))
# # nbc.append(float(results.split("\n")[11].split(" ")[1]))
# # snac.append(float(results.split("\n")[12].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./svhn/svhn_model/no_improve/coverage_result_" + str(i) + ".txt") as f:
# # results = f.read()
# # nc_1.append(float(results.split("\n")[3].split(" ")[1]))
# # nc_3.append(float(results.split("\n")[4].split(" ")[1]))
# # nc_5.append(float(results.split("\n")[5].split(" ")[1]))
# # nc_7.append(float(results.split("\n")[6].split(" ")[1]))
# # nc_9.append(float(results.split("\n")[7].split(" ")[1]))
# # tknc.append(float(results.split("\n")[8].split(" ")[1]))
# # tknp.append(float(results.split("\n")[9].split(" ")[1]))
# # kmnc.append(float(results.split("\n")[10].split(" ")[1]))
# # nbc.append(float(results.split("\n")[11].split(" ")[1]))
# # snac.append(float(results.split("\n")[12].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./svhn/svhn_second/no_improve/coverage_result_" + str(i) + ".txt") as f:
# # results = f.read()
# # nc_1.append(float(results.split("\n")[3].split(" ")[1]))
# # nc_3.append(float(results.split("\n")[4].split(" ")[1]))
# # nc_5.append(float(results.split("\n")[5].split(" ")[1]))
# # nc_7.append(float(results.split("\n")[6].split(" ")[1]))
# # nc_9.append(float(results.split("\n")[7].split(" ")[1]))
# # tknc.append(float(results.split("\n")[8].split(" ")[1]))
# # tknp.append(float(results.split("\n")[9].split(" ")[1]))
# # kmnc.append(float(results.split("\n")[10].split(" ")[1]))
# # nbc.append(float(results.split("\n")[11].split(" ")[1]))
# # snac.append(float(results.split("\n")[12].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./cifar/resnet20/no_improve/coverage_result_" + str(i) + ".txt") as f:
# # results = f.read()
# # nc_1.append(float(results.split("\n")[3].split(" ")[1]))
# # nc_3.append(float(results.split("\n")[4].split(" ")[1]))
# # nc_5.append(float(results.split("\n")[5].split(" ")[1]))
# # nc_7.append(float(results.split("\n")[6].split(" ")[1]))
# # nc_9.append(float(results.split("\n")[7].split(" ")[1]))
# # tknc.append(float(results.split("\n")[8].split(" ")[1]))
# # tknp.append(float(results.split("\n")[9].split(" ")[1]))
# # kmnc.append(float(results.split("\n")[10].split(" ")[1]))
# # nbc.append(float(results.split("\n")[11].split(" ")[1]))
# # snac.append(float(results.split("\n")[12].split(" ")[1]))
# # for i in [0, 8]:
# # with open("./cifar/resnet20/improve/coverage_result_" + str(i) + ".txt") as f:
# # results = f.read()
# # nc_1.append(float(results.split("\n")[3].split(" ")[1]))
# # nc_3.append(float(results.split("\n")[4].split(" ")[1]))
# # nc_5.append(float(results.split("\n")[5].split(" ")[1]))
# # nc_7.append(float(results.split("\n")[6].split(" ")[1]))
# # nc_9.append(float(results.split("\n")[7].split(" ")[1]))
# # tknc.append(float(results.split("\n")[8].split(" ")[1]))
# # tknp.append(float(results.split("\n")[9].split(" ")[1]))
# # kmnc.append(float(results.split("\n")[10].split(" ")[1]))
# # nbc.append(float(results.split("\n")[11].split(" ")[1]))
# # snac.append(float(results.split("\n")[12].split(" ")[1]))
# # norm_nc_1 = [num/nc_1[0] for num in nc_1]
# # norm_nc_3 = [num/nc_3[0] for num in nc_3]
# # norm_nc_5 = [num/nc_5[0] for num in nc_5]
# # norm_nc_7 = [num/nc_7[0] for num in nc_7]
# # norm_nc_9 = [num/nc_9[0] for num in nc_9]
# # norm_tknc = [num/tknc[0] for num in tknc]
# # norm_tknp = [num/tknp[0] for num in tknp]
# # norm_kmnc = [num/kmnc[0] for num in kmnc]
# # norm_nbc = [num/nbc[0] for num in nbc]
# # norm_snac = [num/snac[0] for num in snac]
# # mnist_data_1 = [norm_nc_1, norm_nc_3, norm_nc_5, norm_nc_7, norm_nc_9, norm_tknc, norm_tknp, norm_kmnc, norm_nbc, norm_snac]
nc_1 = []
nc_3 = []
nc_5 = []
nc_7 = []
nc_9 = []
tknc = []
tknp = []
kmnc = []
nbc = []
snac = []
for i in range(0, 11):
with open("./coverage_results/mnist/lenet1/improve/coverage_result_" + str(i) + ".txt") as f:
results = f.read()
nc_1.append(float(results.split("\n")[3].split(" ")[1]))
nc_3.append(float(results.split("\n")[4].split(" ")[1]))
nc_5.append(float(results.split("\n")[5].split(" ")[1]))
nc_7.append(float(results.split("\n")[6].split(" ")[1]))
nc_9.append(float(results.split("\n")[7].split(" ")[1]))
tknc.append(float(results.split("\n")[8].split(" ")[1]))
tknp.append(float(results.split("\n")[9].split(" ")[1]))
kmnc.append(float(results.split("\n")[10].split(" ")[1]))
nbc.append(float(results.split("\n")[11].split(" ")[1]))
snac.append(float(results.split("\n")[12].split(" ")[1]))
norm_nc_1 = [num/nc_1[0] for num in nc_1]
norm_nc_3 = [num/nc_3[0] for num in nc_3]
norm_nc_5 = [num/nc_5[0] for num in nc_5]
norm_nc_7 = [num/nc_7[0] for num in nc_7]
norm_nc_9 = [num/nc_9[0] for num in nc_9]
norm_tknc = [num/tknc[0] for num in tknc]
norm_tknp = [num/tknp[0] for num in tknp]
norm_kmnc = [num/kmnc[0] for num in kmnc]
norm_nbc = [num/nbc[0] for num in nbc]
norm_snac = [num/snac[0] for num in snac]
mnist_data_1 = [norm_nc_1, norm_nc_3, norm_nc_5, norm_nc_7, norm_nc_9, norm_tknc, norm_tknp, norm_kmnc, norm_nbc, norm_snac]
# mr = []
# acac = []
# actc = []
# alp_l0 = []
# alp_l2 = []
# alp_li = []
# ass = []
# psd = []
# nte = []
# rgb = []
# ric = []
# for i in range(0, 11):
# with open("./RQ2_results/robustness_results/mnist/lenet1/improve/robustness_metrics_" + str(i) + ".txt") as f:
# results = f.read()
# mr.append(float(results.split("\n")[3].split(" ")[1]))
# acac.append(float(results.split("\n")[4].split(" ")[1]))
# actc.append(float(results.split("\n")[5].split(" ")[1]))
# alp_l0.append(float(results.split("\n")[6].split(" ")[1]))
# alp_l2.append(float(results.split("\n")[7].split(" ")[1]))
# alp_li.append(float(results.split("\n")[8].split(" ")[1]))
# ass.append(float(results.split("\n")[9].split(" ")[1]))
# psd.append(float(results.split("\n")[10].split(" ")[1]))
# nte.append(float(results.split("\n")[11].split(" ")[1]))
# rgb.append(float(results.split("\n")[12].split(" ")[1]))
# ric.append(float(results.split("\n")[13].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./robustness_results/mnist/lenet1/no_improve/robustness_metrics_" + str(i) + ".txt") as f:
# # results = f.read()
# # mr.append(float(results.split("\n")[3].split(" ")[1]))
# # acac.append(float(results.split("\n")[4].split(" ")[1]))
# # actc.append(float(results.split("\n")[5].split(" ")[1]))
# # alp_l0.append(float(results.split("\n")[6].split(" ")[1]))
# # alp_l2.append(float(results.split("\n")[7].split(" ")[1]))
# # alp_li.append(float(results.split("\n")[8].split(" ")[1]))
# # ass.append(float(results.split("\n")[9].split(" ")[1]))
# # psd.append(float(results.split("\n")[10].split(" ")[1]))
# # nte.append(float(results.split("\n")[11].split(" ")[1]))
# # rgb.append(float(results.split("\n")[12].split(" ")[1]))
# # ric.append(float(results.split("\n")[13].split(" ")[1]))
# # for i in range(0, 11):
# # with open("./robustness_results/rb/Untitled/mnist/lenet4/improve/robustness_metrics_" + str(i) + ".txt") as f:
# # results = f.read()
# # mr.append(float(results.split("\n")[3].split(" ")[1]))
# # acac.append(float(results.split("\n")[4].split(" ")[1]))
# # actc.append(float(results.split("\n")[5].split(" ")[1]))
# # alp_l0.append(float(results.split("\n")[6].split(" ")[1]))
# # alp_l2.append(float(results.split("\n")[7].split(" ")[1]))
# # alp_li.append(float(results.split("\n")[8].split(" ")[1]))
# # ass.append(float(results.split("\n")[9].split(" ")[1]))
# # psd.append(float(results.split("\n")[10].split(" ")[1]))
# # nte.append(float(results.split("\n")[11].split(" ")[1]))
# # rgb.append(float(results.split("\n")[12].split(" ")[1]))
# # ric.append(float(results.split("\n")[13].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./robustness_results/rb/Untitled/mnist/lenet5/improve/robustness_metrics_" + str(i) + ".txt") as f:
# # results = f.read()
# # mr.append(float(results.split("\n")[3].split(" ")[1]))
# # acac.append(float(results.split("\n")[4].split(" ")[1]))
# # actc.append(float(results.split("\n")[5].split(" ")[1]))
# # alp_l0.append(float(results.split("\n")[6].split(" ")[1]))
# # alp_l2.append(float(results.split("\n")[7].split(" ")[1]))
# # alp_li.append(float(results.split("\n")[8].split(" ")[1]))
# # ass.append(float(results.split("\n")[9].split(" ")[1]))
# # psd.append(float(results.split("\n")[10].split(" ")[1]))
# # nte.append(float(results.split("\n")[11].split(" ")[1]))
# # rgb.append(float(results.split("\n")[12].split(" ")[1]))
# # ric.append(float(results.split("\n")[13].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./robustness_results/rb/Untitled/cifar/resnet20/no_improve/robustness_metrics_" + str(i) + ".txt") as f:
# # results = f.read()
# # mr.append(float(results.split("\n")[3].split(" ")[1]))
# # acac.append(float(results.split("\n")[4].split(" ")[1]))
# # actc.append(float(results.split("\n")[5].split(" ")[1]))
# # alp_l0.append(float(results.split("\n")[6].split(" ")[1]))
# # alp_l2.append(float(results.split("\n")[7].split(" ")[1]))
# # alp_li.append(float(results.split("\n")[8].split(" ")[1]))
# # ass.append(float(results.split("\n")[9].split(" ")[1]))
# # psd.append(float(results.split("\n")[10].split(" ")[1]))
# # nte.append(float(results.split("\n")[11].split(" ")[1]))
# # rgb.append(float(results.split("\n")[12].split(" ")[1]))
# # ric.append(float(results.split("\n")[13].split(" ")[1]))
# for i in range(0, 11):
# with open("./robustness_results/rb/Untitled/cifar/resnet20/improve/robustness_metrics_" + str(i) + ".txt") as f:
# results = f.read()
# mr.append(float(results.split("\n")[3].split(" ")[1]))
# acac.append(float(results.split("\n")[4].split(" ")[1]))
# actc.append(float(results.split("\n")[5].split(" ")[1]))
# alp_l0.append(float(results.split("\n")[6].split(" ")[1]))
# alp_l2.append(float(results.split("\n")[7].split(" ")[1]))
# alp_li.append(float(results.split("\n")[8].split(" ")[1]))
# ass.append(float(results.split("\n")[9].split(" ")[1]))
# psd.append(float(results.split("\n")[10].split(" ")[1]))
# nte.append(float(results.split("\n")[11].split(" ")[1]))
# rgb.append(float(results.split("\n")[12].split(" ")[1]))
# ric.append(float(results.split("\n")[13].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./robustness_results/rb/Untitled/svhn/svhn_first/improve/robustness_metrics_" + str(i) + ".txt") as f:
# # results = f.read()
# # mr.append(float(results.split("\n")[3].split(" ")[1]))
# # acac.append(float(results.split("\n")[4].split(" ")[1]))
# # actc.append(float(results.split("\n")[5].split(" ")[1]))
# # alp_l0.append(float(results.split("\n")[6].split(" ")[1]))
# # alp_l2.append(float(results.split("\n")[7].split(" ")[1]))
# # alp_li.append(float(results.split("\n")[8].split(" ")[1]))
# # ass.append(float(results.split("\n")[9].split(" ")[1]))
# # psd.append(float(results.split("\n")[10].split(" ")[1]))
# # nte.append(float(results.split("\n")[11].split(" ")[1]))
# # rgb.append(float(results.split("\n")[12].split(" ")[1]))
# # ric.append(float(results.split("\n")[13].split(" ")[1]))
# for i in range(0, 11):
# with open("./robustness_results/rb/Untitled/svhn/svhn_model/improve/robustness_metrics_" + str(i) + ".txt") as f:
# results = f.read()
# mr.append(float(results.split("\n")[3].split(" ")[1]))
# acac.append(float(results.split("\n")[4].split(" ")[1]))
# actc.append(float(results.split("\n")[5].split(" ")[1]))
# alp_l0.append(float(results.split("\n")[6].split(" ")[1]))
# alp_l2.append(float(results.split("\n")[7].split(" ")[1]))
# alp_li.append(float(results.split("\n")[8].split(" ")[1]))
# ass.append(float(results.split("\n")[9].split(" ")[1]))
# psd.append(float(results.split("\n")[10].split(" ")[1]))
# nte.append(float(results.split("\n")[11].split(" ")[1]))
# rgb.append(float(results.split("\n")[12].split(" ")[1]))
# ric.append(float(results.split("\n")[13].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./robustness_results/rb/Untitled/svhn/svhn_model/no_improve/robustness_metrics_" + str(i) + ".txt") as f:
# # results = f.read()
# # mr.append(float(results.split("\n")[3].split(" ")[1]))
# # acac.append(float(results.split("\n")[4].split(" ")[1]))
# # actc.append(float(results.split("\n")[5].split(" ")[1]))
# # alp_l0.append(float(results.split("\n")[6].split(" ")[1]))
# # alp_l2.append(float(results.split("\n")[7].split(" ")[1]))
# # alp_li.append(float(results.split("\n")[8].split(" ")[1]))
# # ass.append(float(results.split("\n")[9].split(" ")[1]))
# # psd.append(float(results.split("\n")[10].split(" ")[1]))
# # nte.append(float(results.split("\n")[11].split(" ")[1]))
# # rgb.append(float(results.split("\n")[12].split(" ")[1]))
# # ric.append(float(results.split("\n")[13].split(" ")[1]))
# # for i in [0, 10]:
# # with open("./robustness_results/rb/Untitled/svhn/svhn_second/no_improve/robustness_metrics_" + str(i) + ".txt") as f:
# # results = f.read()
# # mr.append(float(results.split("\n")[3].split(" ")[1]))
# # acac.append(float(results.split("\n")[4].split(" ")[1]))
# # actc.append(float(results.split("\n")[5].split(" ")[1]))
# # alp_l0.append(float(results.split("\n")[6].split(" ")[1]))
# # alp_l2.append(float(results.split("\n")[7].split(" ")[1]))
# # alp_li.append(float(results.split("\n")[8].split(" ")[1]))
# # ass.append(float(results.split("\n")[9].split(" ")[1]))
# # psd.append(float(results.split("\n")[10].split(" ")[1]))
# # nte.append(float(results.split("\n")[11].split(" ")[1]))
# # rgb.append(float(results.split("\n")[12].split(" ")[1]))
# # ric.append(float(results.split("\n")[13].split(" ")[1]))
# norm_mr = [num/mr[5] for num in mr]
# norm_acac = [num/acac[5] for num in acac]
# norm_actc = [num/actc[5] for num in actc]
# norm_alp_l0 = [num/alp_l0[5] for num in alp_l0]
# norm_alp_l2 = [num/alp_l2[5] for num in alp_l2]
# norm_alp_li = [num/alp_li[5] for num in alp_li]
# norm_ass = [num/ass[5] for num in ass]
# norm_psd = [num/psd[5] for num in psd]
# norm_nte = [num/nte[5] for num in nte]
# norm_rgb = [num/rgb[5] for num in rgb]
# norm_ric = [num/ric[5] for num in ric]
# mnist_data_rb = [norm_mr, norm_acac, norm_actc, norm_alp_l0, norm_alp_l2, norm_alp_li, norm_ass, norm_psd, norm_nte, norm_rgb, norm_ric]
mr = []
acac = []
actc = []
alp_l0 = []
alp_l2 = []
alp_li = []
ass = []
psd = []
nte = []
rgb = []
ric = []
for i in range(0, 11):
with open("./robustness_results/mnist/lenet1/improve/robustness_metrics_" + str(i) + ".txt") as f:
results = f.read()
mr.append(float(results.split("\n")[3].split(" ")[1]))
acac.append(float(results.split("\n")[4].split(" ")[1]))
actc.append(float(results.split("\n")[5].split(" ")[1]))
alp_l0.append(float(results.split("\n")[6].split(" ")[1]))
alp_l2.append(float(results.split("\n")[7].split(" ")[1]))
alp_li.append(float(results.split("\n")[8].split(" ")[1]))
ass.append(float(results.split("\n")[9].split(" ")[1]))
psd.append(float(results.split("\n")[10].split(" ")[1]))
nte.append(float(results.split("\n")[11].split(" ")[1]))
rgb.append(float(results.split("\n")[12].split(" ")[1]))
ric.append(float(results.split("\n")[13].split(" ")[1]))
# for i in range(0, 11):
# with open("./robustness_results/rb/Untitled/svhn/svhn_model/no_improve/robustness_metrics_" + str(i) + ".txt") as f:
# results = f.read()
# mr.append(float(results.split("\n")[3].split(" ")[1]))
# acac.append(float(results.split("\n")[4].split(" ")[1]))
# actc.append(float(results.split("\n")[5].split(" ")[1]))
# alp_l0.append(float(results.split("\n")[6].split(" ")[1]))
# alp_l2.append(float(results.split("\n")[7].split(" ")[1]))
# alp_li.append(float(results.split("\n")[8].split(" ")[1]))
# ass.append(float(results.split("\n")[9].split(" ")[1]))
# psd.append(float(results.split("\n")[10].split(" ")[1]))
# nte.append(float(results.split("\n")[11].split(" ")[1]))
# rgb.append(float(results.split("\n")[12].split(" ")[1]))
# ric.append(float(results.split("\n")[13].split(" ")[1]))
# for i in range(0, 11):
# with open("./robustness_results/rb/Untitled/cifar/resnet20/no_improve/robustness_metrics_" + str(i) + ".txt") as f:
# results = f.read()
# mr.append(float(results.split("\n")[3].split(" ")[1]))
# acac.append(float(results.split("\n")[4].split(" ")[1]))
# actc.append(float(results.split("\n")[5].split(" ")[1]))
# alp_l0.append(float(results.split("\n")[6].split(" ")[1]))
# alp_l2.append(float(results.split("\n")[7].split(" ")[1]))
# alp_li.append(float(results.split("\n")[8].split(" ")[1]))
# ass.append(float(results.split("\n")[9].split(" ")[1]))
# psd.append(float(results.split("\n")[10].split(" ")[1]))
# nte.append(float(results.split("\n")[11].split(" ")[1]))
# rgb.append(float(results.split("\n")[12].split(" ")[1]))
# ric.append(float(results.split("\n")[13].split(" ")[1]))
norm_mr = [num/mr[5] for num in mr]
norm_acac = [num/acac[5] for num in acac]
norm_actc = [num/actc[5] for num in actc]
norm_alp_l0 = [num/alp_l0[5] for num in alp_l0]
norm_alp_l2 = [num/alp_l2[5] for num in alp_l2]
norm_alp_li = [num/alp_li[5] for num in alp_li]
norm_ass = [num/ass[5] for num in ass]
norm_psd = [num/psd[5] for num in psd]
norm_nte = [num/nte[5] for num in nte]
norm_rgb = [num/rgb[5] for num in rgb]
norm_ric = [num/ric[5] for num in ric]
mnist_data_rb_1 = [norm_mr, norm_acac, norm_actc, norm_alp_l0, norm_alp_l2, norm_alp_li, norm_ass, norm_psd, norm_nte, norm_rgb, norm_ric]
# Define a list of markevery cases and color cases to plot
cases = ["NC(0.1)",
"NC(0.1)",
"NC(0.1)",
"NC(0.1)",
"NC(0.1)",
"TKNC",
"TKNP",
"KMNC",
"NBC",
"SNAC"]
colors = ['#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2',
'#7f7f7f',
'#bcbd22',
'#17becf']
plt.rcParams['font.serif'] = 'Times'
mpl.rcParams['text.usetex'] = True
# Set the plot curve with markers and a title
fig, ax = plt.subplots(figsize=(11, 8), tight_layout=True)
ax.plot(norm_nc_1, marker='.', label=str(cases[0]), linewidth=3)
ax.plot(norm_nc_3, marker='*', label=str(cases[1]), linewidth=3)
ax.plot(norm_nc_5, marker='o', label=str(cases[2]), linewidth=3)
ax.plot(norm_nc_7, marker='+', label=str(cases[3]), linewidth=3)
ax.plot(norm_nc_9, marker='v', label=str(cases[4]), linewidth=3)
ax.plot(norm_tknc, marker='^', label=str(cases[5]), linewidth=3)
ax.plot(norm_tknp, marker='<', label=str(cases[6]), linewidth=3)
ax.plot(norm_kmnc, marker='>', label=str(cases[7]), linewidth=3)
ax.plot(norm_nbc, marker='s', label=str(cases[8]), linewidth=3)
ax.plot(norm_snac, marker='D', label=str(cases[9]), linewidth=3)
plt.legend(bbox_to_anchor=(0.01, 0.99), loc='upper left', borderaxespad=0., fontsize=21, ncol=2)
plt.xticks(np.arange(0, 11), ["$D_0$", "$D_1$", "$D_2$", "$D_3$", "$D_4$", "$D_5$", "$D_6$", "$D_7$", "$D_8$", "$D_9$", "$D_{10}$"], fontsize=25)
plt.yticks(np.arange(0.985, 1.120, 0.015), fontsize=25)
# plt.xlabels(["$-1$", "$-1$", "$-1$", "$-1$", "$-1$", "$-1$", "$-1$", "$-1$", "$-1$", "$-1$", "$-1$"])
plt.ylabel('Relative Coverage', fontsize=31)
plt.savefig("./fig1.pdf")
plt.show()
# Define a list of markevery cases and color cases to plot
cases = ["MR", "ACAC",
"ACTC",
"ALP\_L0",
"ALP\_L2",
"ALP\_Li",
"ASS",
"PSD",
"NTE",
"RGB",
"RIC"]
colors = ['#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2',
'#7f7f7f',
'#bcbd22',
'#17becf',
'#17bce2']
# import matplotlib
mpl.rcParams['text.usetex'] = True
fig, ax = plt.subplots(figsize=(11, 8), tight_layout=True)
ax.plot(norm_mr, marker='.', label=str(cases[0]), linewidth=3)
ax.plot(norm_acac, marker='*', label=str(cases[1]), linewidth=3)
ax.plot(norm_actc, marker='o', label=str(cases[2]), linewidth=3)
ax.plot(norm_alp_l0, marker='+', label=str(cases[3]), linewidth=3)
ax.plot(norm_alp_l2, marker='v', label=str(cases[4]), linewidth=3)
ax.plot(norm_alp_li, marker='^', label=str(cases[5]), linewidth=3)
ax.plot(norm_ass, marker='<', label=str(cases[6]), linewidth=3)
ax.plot(norm_psd, marker='>', label=str(cases[7]), linewidth=3)
ax.plot(norm_nte, marker='s', label=str(cases[8]), linewidth=3)
ax.plot(norm_rgb, marker='D', label=str(cases[9]), linewidth=3)
ax.plot(norm_ric, marker='D', label=str(cases[10]), linewidth=3)
plt.legend(bbox_to_anchor=(0.98, 0.98), loc='upper right', borderaxespad=0., fontsize=21, ncol=2)
plt.xticks(np.arange(0, 11), ["$D_0$", "$D_1$", "$D_2$", "$D_3$", "$D_4$", "$D_5$", "$D_6$", "$D_7$", "$D_8$", "$D_9$", "$D_{10}$"], fontsize=25)
plt.yticks(np.arange(0.875, 1.385, 0.05), fontsize=25)
# plt.xlabels(["$-1$", "$-1$", "$-1$", "$-1$", "$-1$", "$-1$", "$-1$", "$-1$", "$-1$", "$-1$", "$-1$"])
plt.ylabel('Model Quality', fontsize=31)
plt.savefig("./fig2.pdf")
plt.show()
exit()
# # print(len(mnist_data_1[0]), len(mnist_data_rb[0]))
# exit()
# results = []
# # calculate Kendallta
# # for metric_1 in mnist_data_1:
# # tmp = []
# # print("\n")
# # for metric_2 in mnist_data_rb:
# # Kendallta, p_value = kendalltau(np.array(metric_1), np.array(metric_2))
# # # p_value = ttest_ind(np.array(metric_1), np.array(metric_2)).pvalue
# # # print(round(p_value,4), end=" ")
# # # print(metric_1, metric_2)
# # v = func(np.array(metric_1), np.array(metric_2))
# # tmp.append(round(v, 2))
# # results.append(tmp)
def cohend(d1, d2):
# calculate the size of samples
n1, n2 = len(d1), len(d2)
# print(n2, n1)
# calculate the variance of the samples
s1, s2 = std(d1), std(d2)
# calculate the pooled standard deviation
s = sqrt(((n1 - 1) * s1 **2 + (n2 - 1) * s2 **2) / (n1 + n2 - 2))
# calculate the means of the samples
u1, u2 = mean(d1), mean(d2)
# print(u2, u1)
# calculate the effect size
return (u2 - u1) / s
results = []
for metric_1, metric_2 in zip(mnist_data_rb, mnist_data_rb_1):
p_value = mannwhitneyu(metric_1, metric_2).pvalue
cd = cohend(metric_1, metric_2)
results.append(p_value)
print(results)
# exit()
# # create hot picture in Seaborn
# f, ax = plt.subplots(figsize=(12, 12.5))
# mpl.rcParams['text.usetex'] = True
# plt.rcParams['font.serif'] = 'Times'
# label_y = ["MR", "ACAC",
# "ACTC",
# "ALP-L0",
# "ALP-L2",
# "ALP-Li",
# "ASS",
# "PSD",
# "NTE",
# "RGB",
# "RIC"]
# # label_y = ["NC(0.1)",
# # "NC(0.3)",
# # "NC(0.5)",
# # "NC(0.7)",
# # "NC(0.9)",
# # "TKNC",
# # "TKNP",
# # "KMNC",
# # "NBC",
# # "SNAC"]
# label_x = ["NC(0.1)",
# "NC(0.3)",
# "NC(0.5)",
# "NC(0.7)",
# "NC(0.9)",
# "TKNC",
# "TKNP",
# "KMNC",
# "NBC",
# "SNAC"]
# # mask = np.zeros_like(np.array(results), dtype=np.bool)
# # mask[np.triu_indices_from(mask)] = True
# # print(type(mask))
# heatmap = sns.heatmap(np.array(results),
# square=True,
# # mask = mask,
# cmap='coolwarm',
# cbar_kws={'shrink': 0.7, 'ticks': [-1, -.5, 0, 0.5, 1]},
# vmin=-1,
# vmax=1,
# annot=True,
# annot_kws={'size': 20},
# xticklabels = label_y,
# yticklabels = label_x)
# # ax.set_xticks(np.arange(len(label_y)), labels=label_y)
# # ax.set_yticks(np.arange(len(label_x)), labels=label_x)
# cax = plt.gcf().axes[-1]
# cax.tick_params(labelsize=18)
# # # plt.setp(label_y, rotation = 45)
# # # plt.setp(label_x, rotation = 45)
# plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
# rotation_mode="anchor")
# plt.setp(ax.get_yticklabels(), rotation=45, ha="right",
# rotation_mode="anchor")
# # cb = heatmap.figure.colorbar(heatmap.collections[0])
# # cb.ax.tick_params(length = 0.001, width=2, labelsize=10)
# # sns.set_style({'yticklabels.top': True})
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# plt.savefig('./fig6.pdf', bbox_inches='tight')
# plt.show()
# \begin{table*}[!t]
# \caption{Misclassifcation rates for different test generation methods.}
# \begin{center}
# \begin{tabular}{|c|c|cccccc|}
# \hline
# \multirow{2}{*}{Datasets} & \multirow{2}{*}{Models} & \multicolumn{6}{c|}{Misclassification Rate } \\ \cline{3-8}
# & & \multicolumn{1}{c|}{Benign} & \multicolumn{1}{c|}{Diff} & \multicolumn{1}{c|}{Non-Diff} & \multicolumn{1}{c|}{DH} & \multicolumn{1}{c|}{FGSM} & \multicolumn{1}{c|}{PGD} \\ \hline
# \hline\multirow{3}{*}{MNIST} & LeNet-1 & 1.33\% & 49.26\% & 48.45\% & 48.65\% & 90.59\% & 100.00\% \\
# & LeNet-4 & 1.38\% & 45.51\% & 43.11\% & 44.50\% & 80.78\% & 99.98\% \\
# & LeNet-5 & 1.02\% & 42.97\% & 42.10\% & 43.05\% & 64.52\% & 99.70\% \\
# \hline\multirow{3}{*}{CIFAR} & VGG-16 & 7.20\% & 79.05\% & 80.78\% & 79.77\% & 68.24\% & 98.90\% \\
# & ResNet-20 & 8.26\% & 68.13\% & 65.22\% & 66.37\% & 82.68\% & 100.00\% \\
# & ResNet-56 & 7.24\% & 62.64\% & 65.34\% & 64.08\% & 70.71\% & 100.00\% \\
# \hline\multirow{3}{*}{SVHN} & SADL-1 & 10.30\% & 23.97\% & 30.61\% & 27.27\% & 83.96\% & 99.99\% \\
# & SADL-2 & 12.28\% & 24.17\% & 22.04\% & 22.89\% & 91.73\% & 99.94\% \\
# & SADL-3 & 7.43\% & 20.90\% & 16.71\% & 18.45\% & 88.80\% & 100.00\% \\
# \hline \multirow{3}{*}{EuroSAT} & VGG-16 & 3.50\% & 50.09\% & 40.93\% & 45.15\% & 88.76\% & 97.72\% \\
# & ResNet-20 & 2.89\% & 48.69\% & 54.06\% & 51.61\% & 88.74\% & 99.41\% \\
# & ResNet-32 & 2.78\% & 46.94\% & 48.87\% & 48.11\% & 88.22\% & 99.17\% \\
# & ResNet-56 & 3.65\% & 44.72\% & 34.89\% & 39.26\% & 85.50\% & 99.72\% \\
# \hline
# \end{tabular}
# \label{tab:mr_on_attack}
# \end{center}
# \end{table*}
# fgsm = [90.59, 80.78, 64.52, 68.24, 82.68, 70.71, 83.96, 91.73, 80.80, 88.76, 88.74, 88.22, 85.50]
# pgd = [100.00, 99.98, 99.70, 98.90, 100.00, 100.00, 99.99, 99.94, 100.00, 97.72, 99.41, 99.17, 99.72]
# diff = [49.26, 45.51, 42.97, 79.05, 68.13, 62.64, 23.97, 24.17, 20.90, 50.09, 48.69, 46.94, 44.72]
# non_diff = [48.45, 43.11, 42.10, 80.78, 65.22, 65.34, 30.61, 22.04, 16.71, 40.93, 54.06, 48.87, 34.89]
# dh = [48.65, 44.50, 43.05, 79.77, 66.37, 64.08, 27.27, 22.89, 18.45, 45.15, 51.61, 48.11, 39.26]
# # for metric_1, metric_2 in zip(diff, non_diff):
# # # for i in metric_1:
# # metric_2.append(i)
# # print(metric_2)
# p_value_1 = mannwhitneyu(fgsm+fgsm+fgsm+pgd+pgd+pgd, diff+non_diff+dh+diff+non_diff+dh).pvalue
# # p_value_2 = mannwhitneyu(, diff+non_diff+dh).pvalue
# # p_value_1 = mannwhitneyu(dh, non_diff).pvalue
# # p_value_2 = ttest_ind(pgd+pgd+pgd, diff+non_diff+dh).pvalue
# # cd = cohend(metric_1, metric_2)
# print(p_value_1)
# \begin{table*}[]
# % \caption{Accuracy of defended models.}
# % \begin{center}
# % \begin{tabular}{|c|c|cccccc|}
# % \hline
# % \multirow{2}{*}{Datasets} & \multirow{2}{*}{Models} & \multicolumn{6}{c|}{Accuracy } \\ \cline{3-8}
# % & & \multicolumn{1}{c|}{Benign} & \multicolumn{1}{c|}{D} & \multicolumn{1}{c|}{N} & \multicolumn{1}{c|}{DH} & \multicolumn{1}{c|}{FGSM} & \multicolumn{1}{c|}{PGD} \\ \hline
# % \hline\multirow{6}{*}{MNIST} & LN5 & 98.98\%(+0.00\%) & 57.03\%(+0.00\%) & 57.90\%(+0.00\%) & 56.95\%(+0.00\%) & 35.48\%(+0.00\%) & 0.30\%(0.00\%) \\
# % & LN5-D & 98.69\%(-0.29\%) & 55.23\%(-1.80\%) & 55.16\%(-2.74\%) & 55.12\%(-1.83\%) & 42.24\%(+6.76\%) & 0.89\%(+0.59\%) \\
# % & LN5-N & 98.60\%(-0.38\%) & 54.71\%(-2.32\%) & 50.30\%(-7.60\%) & 52.38\%(-4.57\%) & 30.41\%(-5.07\%) & 0.02\%(-0.28\%) \\
# % & LN5-DH & 98.80\%(-0.18\%) & 55.63\%(-1.40\%) & 52.93\%(-4.97\%) & 53.95\%(-3.00\%) & 36.41\%(+0.93\%) & 0.01\%(-0.29\%) \\
# % & LN5-FGSM & 97.77\%(-1.21\%) & 59.35\%(+2.32\%) & 63.48\%(+5.58\%) & 61.71\%(+4.76\%) & 86.03\%(+50.55\%) & 22.22\%(+21.92\%) \\
# % & LN5-PGD & 97.04\%(-1.94\%) & 60.37\%(+3.34\%) & 61.55\%(+3.65\%) & 61.03\%(+4.08\%) & 68.70\%(+33.22\%) & 33.75\%(+33.45\%) \\
# % \hline\multirow{6}{*}{CIFAR} & V16 & 92.80\%(+0.00\%) & 20.95\%(+0.00\%) & 19.22\%(+0.00\%) & 20.23\%(+0.00\%) & 31.76\%(+0.00\%) & 1.10\%(0.00\%) \\
# % & V16-D & 86.34\%(-6.46\%) & 30.76\%(+9.81\%) & 27.31\%(+8.09\%) & 28.53\%(+8.30\%) & 14.50\%(-17.26\%) & 0.15\%(-0.95\%) \\
# % & V16-N & 77.72\%(-15.08\%) & 23.67\%(+2.72\%) & 26.85\%(+7.63\%) & 25.44\%(+5.21\%) & 13.32\%(-18.44\%) & 0.45\%(-0.65\%) \\
# % & V16-DH & 88.22\%(-4.58\%) & 28.35\%(+7.40\%) & 28.84\%(+9.62\%) & 28.87\%(+8.64\%) & 17.39\%(-14.37\%) & 0.48\%(-0.62\%) \\
# % & V16-FGSM & 84.28\%(-8.52\%) & 10.00\%(-10.95\%) & 9.97\%(-9.25\%) & 9.99\%(-10.24\%) & 30.56\%(-1.20\%) & 8.35\%(+7.25\%) \\
# % & V16-PGD & 87.83\%(-4.97\%) & 10.50\%(-10.45\%) & 10.18\%(-9.04\%) & 10.72\%(-9.51\%) & 54.67\%(+22.91\%) & 40.78\%(+39.68\%) \\
# % \hline\multirow{6}{*}{SVHN} & S3 & 92.57\%(+0.00\%) & 79.10\%(+0.00\%) & 83.29\%(+0.00\%) & 81.55\%(+0.00\%) & 11.20\%(+0.00\%) & 0.00\%(0.00\%) \\
# % & S3-D & 93.77\%(+1.20\%) & 77.92\%(-1.18\%) & 77.87\%(-5.43\%) & 78.20\%(-3.35\%) & 30.60\%(+19.40\%) & 3.88\%(+3.88\%) \\
# % & S3-N & 94.49\%(+1.92\%) & 79.49\%(+0.40\%) & 81.21\%(-2.08\%) & 80.35\%(-1.19\%) & 38.64\%(+27.44\%) & 3.33\%(+3.33\%) \\
# % & S3-DH & 94.24\%(+1.67\%) & 79.19\%(+0.10\%) & 81.13\%(-2.16\%) & 79.99\%(-1.56\%) & 29.49\%(+18.29\%) & 4.48\%(+4.48\%) \\
# % & S3-FGSM & 92.28\%(-0.29\%) & 78.35\%(-0.75\%) & 82.63\%(-0.67\%) & 80.81\%(-0.74\%) & 84.30\%(+73.09\%) & 13.53\%(+13.53\%) \\
# % & S3-PGD & 87.74\%(-4.83\%) & 74.90\%(-4.20\%) & 76.49\%(-6.80\%) & 75.80\%(-5.75\%) & 59.67\%(+48.46\%) & 50.57\%(+50.57\%) \\
# % \hline\multirow{6}{*}{EuroSAT} & RN20 & 97.11\%(+0.00\%) & 51.31\%(+0.00\%) & 45.94\%(+0.00\%) & 48.39\%(+0.00\%) & 11.26\%(+0.00\%) & 0.59\%(0.00\%) \\
# % & RN20-D & 89.46\%(-7.65\%) & 50.00\%(-1.31\%) & 45.52\%(-0.43\%) & 48.52\%(+0.13\%) & 10.69\%(-0.57\%) & 1.96\%(+1.37\%) \\
# % & RN20-N & 93.76\%(-3.35\%) & 52.24\%(+0.93\%) & 44.54\%(-1.41\%) & 48.19\%(-0.20\%) & 14.69\%(+3.43\%) & 1.83\%(+1.24\%) \\
# % & RN20-DH & 93.28\%(-3.83\%) & 53.07\%(+1.76\%) & 45.41\%(-0.54\%) & 49.20\%(+0.81\%) & 7.85\%(-3.41\%) & 0.22\%(-0.37\%) \\
# % & RN20-FGSM & 22.70\%(-74.41\%) & 14.65\%(-36.67\%) & 14.28\%(-31.67\%) & 14.59\%(-33.80\%) & 25.81\%(+14.56\%) & 0.02\%(-0.57\%) \\
# % & RN20-PGD & 26.26\%(-70.85\%) & 15.44\%(-35.87\%) & 15.30\%(-30.65\%) & 15.30\%(-33.09\%) & 22.87\%(+11.61\%) & 22.57\%(+21.98\%) \\
# % \hline
# % \end{tabular}
# % \end{center}
# % \label{tab:rq4}
# % \end{table*}
# adv = [98.69, 98.60, 98.80, 97.77, 97.04, 86.34, 77.72, 88.22, 84.28, 87.83, 93.77, 94.49, 94.24, 92.28, 87.74, 89.46, 93.76, 93.28, 22.70, 33.87, 93.91, 93.33, 85.70, 42.69, 39.19]
# n_a = [98.98, 98.98, 98.98, 98.98, 98.98, 92.80, 92.80, 92.80, 92.80, 92.80, 92.57, 92.57, 92.57, 92.57, 92.57, 97.11, 97.11, 97.11, 97.11, 97.11, 96.35, 96.35, 96.35, 96.35, 96.35]
# print(mannwhitneyu(adv, n_a).pvalue, mean(adv) - mean(n_a), cohend(adv, n_a))
```
#### File: CovTesting_Replication/Model Accuracy under Different Scenarios/generate_dataset.py
```python
import tensorflow_datasets as tfds
import numpy as np
import os
import argparse
DATA_DIR = "../data/"
## convert array of indices to 1-hot encoded numpy array
def convert_label_to_one_hot_encoding(index_labels):
one_hot_labels = np.zeros((index_labels.size, index_labels.max()+1))
one_hot_labels[np.arange(index_labels.size), index_labels] = 1
return one_hot_labels
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default='eurosat', type=str)
args = parser.parse_args()
# please check this link to browse more datasets
# https://www.tensorflow.org/datasets/catalog/fashion_mnist
# choices = ["fashion_mnist", "eurosat", "oxford_flowers102", "mnist", "cifar10"]
dataset_name = args.dataset
# batch_size=-1 to get the full dataset in NumPy arrays from the returned tf.Tensor object
if dataset_name == "fashion_mnist" or dataset_name == "oxford_flowers102":
dataset_train = tfds.load(
name=dataset_name, split=tfds.Split.TRAIN, batch_size=-1)
dataset_test = tfds.load(
name=dataset_name, split=tfds.Split.TEST, batch_size=-1)
elif dataset_name == "food101":
dataset_train = tfds.load(
name=dataset_name, split=tfds.Split.TRAIN, batch_size=-1)
dataset_test = tfds.load(
name=dataset_name, split=tfds.Split.VALIDATION, batch_size=-1)
elif dataset_name == "eurosat":
dataset_train = tfds.load(
name=dataset_name, split="train[:80%]", batch_size=-1)
dataset_test = tfds.load(
name=dataset_name, split="train[80%:]", batch_size=-1)
else:
dataset_train = tfds.load(
name=dataset_name, split=tfds.Split.TRAIN, batch_size=-1)
dataset_test = tfds.load(
name=dataset_name, split=tfds.Split.TEST, batch_size=-1)
# tfds.as_numpy return a generator that yields NumPy array records out of a tf.data.Dataset
dataset_train = tfds.as_numpy(dataset_train)
dataset_test = tfds.as_numpy(dataset_test)
# seperate the x and y
x_train, y_train = dataset_train["image"], dataset_train["label"]
x_test, y_test = dataset_test["image"], dataset_test["label"]
y_train = convert_label_to_one_hot_encoding(y_train)
y_test = convert_label_to_one_hot_encoding(y_test)
os.makedirs(os.path.join(
DATA_DIR, dataset_name + "/benign/"), exist_ok=True)
np.save(os.path.join(DATA_DIR, dataset_name + "/benign/x_train.npy"), x_train)
np.save(os.path.join(DATA_DIR, dataset_name + "/benign/y_train.npy"), y_train)
np.save(os.path.join(DATA_DIR, dataset_name + "/benign/x_test.npy"), x_test)
np.save(os.path.join(DATA_DIR, dataset_name + "/benign/y_test.npy"), y_test)
print("\n\nCHECK STANDARD\n")
print(dataset_name)
print(x_train.shape)
print(y_train.shape)
print(y_train[0])
## Check format/standard with the previous dataset
from helper import load_data
x_train, y_train, x_test, y_test = load_data("mnist")
print("")
print("mnist")
print(x_train.shape)
print(y_train.shape)
print(y_train[0])
x_train, y_train, x_test, y_test = load_data("cifar")
print("")
print("cifar")
print(x_train.shape)
print(y_train.shape)
print(y_train[0])
```
#### File: CovTesting_Replication/Model Accuracy under Different Scenarios/helper.py
```python
import time
import os
import numpy as np
from mutators import Mutators
import random
import copy
import shutil
from keras import backend as K
DATA_DIR = "../data/"
MODEL_DIR = "../models/"
# helper function
def get_layer_i_output(model, i, data):
layer_model = K.function([model.layers[0].input], [model.layers[i].output])
ret = layer_model([data])[0]
num = data.shape[0]
ret = np.reshape(ret, (num, -1))
return ret
class Coverage:
def __init__(self, model, x_train, y_train, x_test, y_test, x_adv):
self.model = model
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.x_adv = x_adv
# find scale factors and min num
def scale(self, layers, batch=1024):
data_num = self.x_adv.shape[0]
factors = dict()
for i in layers:
begin, end = 0, batch
max_num, min_num = np.NINF, np.inf
while begin < data_num:
layer_output = get_layer_i_output(self.model, i, self.x_adv[begin:end])
tmp = layer_output.max()
max_num = tmp if tmp > max_num else max_num
tmp = layer_output.min()
min_num = tmp if tmp < min_num else min_num
begin += batch
end += batch
factors[i] = (max_num - min_num, min_num)
return factors
# 1 Neuron Coverage
def NC(self, layers, threshold=0., batch=1024):
factors = self.scale(layers, batch=batch)
neuron_num = 0
for i in layers:
out_shape = self.model.layers[i].output.shape
neuron_num += np.prod(out_shape[1:])
neuron_num = int(neuron_num)
activate_num = 0
data_num = self.x_adv.shape[0]
for i in layers:
neurons = np.prod(self.model.layers[i].output.shape[1:])
buckets = np.zeros(neurons).astype('bool')
begin, end = 0, batch
while begin < data_num:
layer_output = get_layer_i_output(self.model, i, self.x_adv[begin:end])
# scale the layer output to (0, 1)
layer_output -= factors[i][1]
layer_output /= factors[i][0]
col_max = np.max(layer_output, axis=0)
begin += batch
end += batch
buckets[col_max > threshold] = True
activate_num += np.sum(buckets)
# print('NC:\t{:.3f} activate_num:\t{} neuron_num:\t{}'.format(activate_num / neuron_num, activate_num, neuron_num))
return activate_num / neuron_num, activate_num, neuron_num
# 2 k-multisection neuron coverage, neuron boundary coverage and strong activation neuron coverage
def KMNC(self, layers, k=10, batch=1024):
neuron_num = 0
for i in layers:
out_shape = self.model.layers[i].output.shape
neuron_num += np.prod(out_shape[1:])
neuron_num = int(neuron_num)
covered_num = 0
l_covered_num = 0
u_covered_num = 0
for i in layers:
neurons = np.prod(self.model.layers[i].output.shape[1:])
print(neurons)
begin, end = 0, batch
data_num = self.x_train.shape[0]
neuron_max = np.full(neurons, np.NINF).astype('float')
neuron_min = np.full(neurons, np.inf).astype('float')
while begin < data_num:
layer_output_train = get_layer_i_output(self.model, i, self.x_train[begin:end])
batch_neuron_max = np.max(layer_output_train, axis=0)
batch_neuron_min = np.min(layer_output_train, axis=0)
neuron_max = np.maximum(batch_neuron_max, neuron_max)
neuron_min = np.minimum(batch_neuron_min, neuron_min)
begin += batch
end += batch
buckets = np.zeros((neurons, k + 2)).astype('bool')
interval = (neuron_max - neuron_min) / k
# print(interval[8], neuron_max[8], neuron_min[8])
begin, end = 0, batch
data_num = self.x_adv.shape[0]
while begin < data_num:
layer_output_adv = get_layer_i_output(model, i, self.x_adv[begin: end])
layer_output_adv -= neuron_min
layer_output_adv /= (interval + 10 ** (-100))
layer_output_adv[layer_output_adv < 0.] = -1
layer_output_adv[layer_output_adv >= k / 1.0] = k
layer_output_adv = layer_output_adv.astype('int')
# index 0 for lower, 1 to k for between, k + 1 for upper
layer_output_adv = layer_output_adv + 1
for j in range(neurons):
uniq = np.unique(layer_output_adv[:, j])
# print(layer_output_adv[:, j])
buckets[j, uniq] = True
begin += batch
end += batch
covered_num += np.sum(buckets[:, 1:-1])
u_covered_num += np.sum(buckets[:, -1])
l_covered_num += np.sum(buckets[:, 0])
print('KMNC:\t{:.3f} covered_num:\t{}'.format(covered_num / (neuron_num * k), covered_num))
print(
'NBC:\t{:.3f} l_covered_num:\t{}'.format((l_covered_num + u_covered_num) / (neuron_num * 2), l_covered_num))
print('SNAC:\t{:.3f} u_covered_num:\t{}'.format(u_covered_num / neuron_num, u_covered_num))
return covered_num / (neuron_num * k), (l_covered_num + u_covered_num) / (
neuron_num * 2), u_covered_num / neuron_num, covered_num, l_covered_num, u_covered_num, neuron_num * k
# 3 top-k neuron coverage
def TKNC(self, layers, k=2, batch=1024):
def top_k(x, k):
ind = np.argpartition(x, -k)[-k:]
return ind[np.argsort((-x)[ind])]
neuron_num = 0
for i in layers:
out_shape = self.model.layers[i].output.shape
neuron_num += np.prod(out_shape[1:])
neuron_num = int(neuron_num)
pattern_num = 0
data_num = self.x_adv.shape[0]
for i in layers:
pattern_set = set()
begin, end = 0, batch
while begin < data_num:
layer_output = get_layer_i_output(self.model, i, self.x_adv[begin:end])
topk = np.argpartition(layer_output, -k, axis=1)[:, -k:]
topk = np.sort(topk, axis=1)
# or in order
# topk = np.apply_along_axis[lambda x: top_k(layer_output, k), 1, layer_output]
for j in range(topk.shape[0]):
pattern_set.add(tuple(topk[j]))
begin += batch
end += batch
pattern_num += len(pattern_set)
print(
'TKNC:\t{:.3f} pattern_num:\t{} neuron_num:\t{}'.format(pattern_num / neuron_num, pattern_num, neuron_num))
return pattern_num / neuron_num, pattern_num, neuron_num
# 4 top-k neuron patterns
def TKNP(self, layers, k=2, batch=1024):
def top_k(x, k):
ind = np.argpartition(x, -k)[-k:]
return ind[np.argsort((-x)[ind])]
def to_tuple(x):
l = list()
for row in x:
l.append(tuple(row))
return tuple(l)
pattern_set = set()
layer_num = len(layers)
data_num = self.x_adv.shape[0]
patterns = np.zeros((data_num, layer_num, k))
layer_cnt = 0
for i in layers:
neurons = np.prod(self.model.layers[i].output.shape[1:])
begin, end = 0, batch
while begin < data_num:
layer_output = get_layer_i_output(self.model, i, self.x_adv[begin:end])
topk = np.argpartition(layer_output, -k, axis=1)[:, -k:]
topk = np.sort(topk, axis=1)
# or in order
# topk = np.apply_along_axis[lambda x: top_k(layer_output, k), 1, layer_output]
patterns[begin:end, layer_cnt, :] = topk
begin += batch
end += batch
layer_cnt += 1
for i in range(patterns.shape[0]):
pattern_set.add(to_tuple(patterns[i]))
pattern_num = len(pattern_set)
print('TKNP:\t{:.3f}'.format(pattern_num))
return pattern_num
def all(self, layers, batch=100):
self.NC(layers, batch=batch)
self.KMNC(layers, batch=batch)
self.TKNC(layers, batch=batch)
self.TKNP(layers, batch=batch)
class AttackEvaluate:
# model does not have softmax layer
def __init__(self, model, ori_x, ori_y, adv_x):
self.model = model
# get the raw data
self.nature_samples = ori_x
self.labels_samples = ori_y
# get the adversarial examples
self.adv_samples = adv_x
# self.adv_labels = np.load('{}{}_AdvLabels.npy'.format(self.AdvExamplesDir, self.AttackName))
predictions = model.predict(self.adv_samples)
def soft_max(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
tmp_soft_max = []
for i in range(len(predictions)):
tmp_soft_max.append(soft_max(predictions[i]))
self.softmax_prediction = np.array(tmp_soft_max)
# help function
def successful(self, adv_softmax_preds, nature_true_preds):
if np.argmax(adv_softmax_preds) != np.argmax(nature_true_preds):
return True
else:
return False
# 1 MR:Misclassification Rate
def misclassification_rate(self):
cnt = 0
for i in range(len(self.adv_samples)):
if self.successful(adv_softmax_preds=self.softmax_prediction[i], nature_true_preds=self.labels_samples[i]):
cnt += 1
mr = cnt / len(self.adv_samples)
print('MR:\t\t{:.1f}%'.format(mr * 100))
return mr
# 2 ACAC: average confidence of adversarial class
def avg_confidence_adv_class(self):
cnt = 0
conf = 0
for i in range(len(self.adv_samples)):
if self.successful(adv_softmax_preds=self.softmax_prediction[i], nature_true_preds=self.labels_samples[i]):
cnt += 1
conf += np.max(self.softmax_prediction[i])
print('ACAC:\t{:.3f}'.format(conf / cnt))
return conf / cnt
# 3 ACTC: average confidence of true class
def avg_confidence_true_class(self):
true_labels = np.argmax(self.labels_samples, axis=1)
cnt = 0
true_conf = 0
for i in range(len(self.adv_samples)):
if self.successful(adv_softmax_preds=self.softmax_prediction[i], nature_true_preds=self.labels_samples[i]):
cnt += 1
true_conf += self.softmax_prediction[i, true_labels[i]]
print('ACTC:\t{:.3f}'.format(true_conf / cnt))
return true_conf / cnt
# 4 ALP: Average L_p Distortion
def avg_lp_distortion(self):
ori_r = np.round(self.nature_samples * 255)
adv_r = np.round(self.adv_samples * 255)
NUM_PIXEL = int(np.prod(self.nature_samples.shape[1:]))
pert = adv_r - ori_r
dist_l0 = 0
dist_l2 = 0
dist_li = 0
cnt = 0
for i in range(len(self.adv_samples)):
if self.successful(adv_softmax_preds=self.softmax_prediction[i], nature_true_preds=self.labels_samples[i]):
cnt += 1
dist_l0 += (np.linalg.norm(np.reshape(pert[i], -1), ord=0) / NUM_PIXEL)
dist_l2 += np.linalg.norm(np.reshape(self.nature_samples[i] - self.adv_samples[i], -1), ord=2)
dist_li += np.linalg.norm(np.reshape(self.nature_samples[i] - self.adv_samples[i], -1), ord=np.inf)
adv_l0 = dist_l0 / cnt
adv_l2 = dist_l2 / cnt
adv_li = dist_li / cnt
print('**ALP:**\n\tL0:\t{:.3f}\n\tL2:\t{:.3f}\n\tLi:\t{:.3f}'.format(adv_l0, adv_l2, adv_li))
return adv_l0, adv_l2, adv_li
# 5 ASS: Average Structural Similarity
def avg_SSIM(self):
ori_r_channel = np.round(self.nature_samples * 255).astype(dtype=np.float32)
adv_r_channel = np.round(self.adv_samples * 255).astype(dtype=np.float32)
totalSSIM = 0
cnt = 0
"""
For SSIM function in skimage: http://scikit-image.org/docs/dev/api/skimage.measure.html
multichannel : bool, optional If True, treat the last dimension of the array as channels. Similarity calculations are done
independently for each channel then averaged.
"""
for i in range(len(self.adv_samples)):
if self.successful(adv_softmax_preds=self.softmax_prediction[i], nature_true_preds=self.labels_samples[i]):
cnt += 1
totalSSIM += SSIM(X=ori_r_channel[i], Y=adv_r_channel[i], multichannel=True)
print('ASS:\t{:.3f}'.format(totalSSIM / cnt))
return totalSSIM / cnt
# 6: PSD: Perturbation Sensitivity Distance
def avg_PSD(self):
psd = 0
cnt = 0
for outer in range(len(self.adv_samples)):
if self.successful(adv_softmax_preds=self.softmax_prediction[outer],
nature_true_preds=self.labels_samples[outer]):
cnt += 1
image = self.nature_samples[outer]
pert = abs(self.adv_samples[outer] - self.nature_samples[outer])
# my patch
image = np.transpose(image, (1, 2, 0))
pert = np.transpose(pert, (1, 2, 0))
for idx_channel in range(image.shape[0]):
image_channel = image[idx_channel]
pert_channel = pert[idx_channel]
image_channel = np.pad(image_channel, 1, 'reflect')
pert_channel = np.pad(pert_channel, 1, 'reflect')
for i in range(1, image_channel.shape[0] - 1):
for j in range(1, image_channel.shape[1] - 1):
psd += pert_channel[i, j] * (1.0 - np.std(np.array(
[image_channel[i - 1, j - 1], image_channel[i - 1, j], image_channel[i - 1, j + 1],
image_channel[i, j - 1],
image_channel[i, j], image_channel[i, j + 1], image_channel[i + 1, j - 1],
image_channel[i + 1, j],
image_channel[i + 1, j + 1]])))
print('PSD:\t{:.3f}'.format(psd / cnt))
return psd / cnt
# 7 NTE: Noise Tolerance Estimation
def avg_noise_tolerance_estimation(self):
nte = 0
cnt = 0
for i in range(len(self.adv_samples)):
if self.successful(adv_softmax_preds=self.softmax_prediction[i], nature_true_preds=self.labels_samples[i]):
cnt += 1
sort_preds = np.sort(self.softmax_prediction[i])
nte += sort_preds[-1] - sort_preds[-2]
print('NTE:\t{:.3f}'.format(nte / cnt))
return nte / cnt
# 8 RGB: Robustness to Gaussian Blur
def robust_gaussian_blur(self, radius=0.5):
total = 0
num_gb = 0
for i in range(len(self.adv_samples)):
if np.argmax(self.softmax_prediction[i]) != np.argmax(self.labels_samples[i]):
total += 1
adv_sample = self.adv_samples[i]
gb_sample = gaussian_blur_transform(AdvSample=adv_sample, radius=radius)
gb_pred = self.model.predict(np.expand_dims(np.array(gb_sample), axis=0))
if np.argmax(gb_pred) != np.argmax(self.labels_samples[i]):
num_gb += 1
print('RGB:\t{:.3f}'.format(num_gb / total))
return num_gb, total, num_gb / total
# 9 RIC: Robustness to Image Compression
def robust_image_compression(self, quality=50):
total = 0
num_ic = 0
# prepare the save dir for the generated image(png or jpg)
image_save = os.path.join('./tmp', 'image')
if os.path.exists(image_save):
shutil.rmtree(image_save)
os.mkdir(image_save)
# print('\nNow, all adversarial examples are saved as PNG and then compressed using *Guetzli* in the {} fold ......\n'.format(image_save))
for i in range(len(self.adv_samples)):
if np.argmax(self.softmax_prediction[i]) != np.argmax(self.labels_samples[i]):
total += 1
adv_sample = self.adv_samples[i]
ic_sample = image_compress_transform(IndexAdv=i, AdvSample=adv_sample, dir_name=image_save,
quality=quality)
ic_sample = np.expand_dims(ic_sample, axis=0)
ic_pred = self.model.predict(np.array(ic_sample))
if np.argmax(ic_pred) != np.argmax(self.labels_samples[i]):
num_ic += 1
print('RIC:\t{:.3f}'.format(num_ic / total))
return num_ic, total, num_ic / total
def all(self):
self.misclassification_rate()
self.avg_confidence_adv_class()
self.avg_confidence_true_class()
self.avg_lp_distortion()
self.avg_SSIM()
self.avg_PSD()
self.avg_noise_tolerance_estimation()
self.robust_gaussian_blur()
self.robust_image_compression(1)
def mutate(img, dataset):
# ref_img is the reference image, img is the seed
# cl means the current state of transformation
# 0 means it can select both of Affine and Pixel transformations
# 1 means it only select pixel transformation because an Affine transformation has been used before
# l0_ref, linf_ref: if the current seed is mutated from affine transformation, we will record the l0, l_inf
# between initial image and the reference image. i.e., L0(s_0,s_{j-1}) L_inf(s_0,s_{j-1}) in Equation 2 of the paper
# tyr_num is the maximum number of trials in Algorithm 2
transformations = [Mutators.image_translation, Mutators.image_scale, Mutators.image_shear, Mutators.image_rotation,
Mutators.image_contrast, Mutators.image_brightness, Mutators.image_blur,
Mutators.image_pixel_change,
Mutators.image_noise]
# these parameters need to be carefullly considered in the experiment
# to consider the feedbacks
params = []
params.append(list(range(-3, 3))) # image_translation
params.append(list(map(lambda x: x * 0.1, list(range(7, 12))))) # image_scale
params.append(list(map(lambda x: x * 0.1, list(range(-6, 6))))) # image_shear
params.append(list(range(-50, 50))) # image_rotation
params.append(list(map(lambda x: x * 0.1, list(range(5, 13))))) # image_contrast
params.append(list(range(-20, 20))) # image_brightness
params.append(list(range(1, 10))) # image_blur
params.append(list(range(1, 10))) # image_pixel_change
params.append(list(range(1, 4))) # image_noise
classA = [7, 8] # pixel value transformation
classB = [0, 1, 2, 3, 4, 5, 6] # Affine transformation
x, y, z = img.shape
random.seed(time.time())
tid = random.sample(classA + classB, 1)[0]
# tid = 7
# Randomly select one transformation Line-7 in Algorithm2
transformation = transformations[tid]
params = params[tid]
# Randomly select one parameter Line 10 in Algo2
param = random.sample(params, 1)[0]
# Perform the transformation Line 11 in Algo2
# plt.imshow(img + 0.5)
# plt.show()
if dataset == 'cifar':
# # for cifar dataset
img_new = transformation(img, param)
# img_new = np.round(img_new)
img_new = img_new.reshape(img.shape)
else:
image = np.uint8(np.round((img + 0.5) * 255))
img_new = transformation(copy.deepcopy(image), param)/ 255.0 - 0.5
# img_new = np.round(img_new)
img_new = img_new.reshape(img.shape)
# Otherwise the mutation is failed. Line 20 in Algo 2
return img_new
# the data is in range(-.5, .5)
def load_data(dataset_name):
assert (dataset_name.upper() in ['MNIST', 'CIFAR', 'SVHN', 'FASHION_MNIST', 'OXFORD_FLOWERS102'])
dataset_name = dataset_name.lower()
x_train = np.load(DATA_DIR + dataset_name + '/benign/x_train.npy')
y_train = np.load(DATA_DIR + dataset_name + '/benign/y_train.npy')
x_test = np.load(DATA_DIR + dataset_name + '/benign/x_test.npy')
y_test = np.load(DATA_DIR + dataset_name + '/benign/y_test.npy')
return x_train, y_train, x_test, y_test
def check_data_path(dataset_name):
assert os.path.exists(DATA_DIR + dataset_name + '/benign/x_train.npy')
assert os.path.exists(DATA_DIR + dataset_name + '/benign/y_train.npy')
assert os.path.exists(DATA_DIR + dataset_name + '/benign/x_test.npy')
assert os.path.exists(DATA_DIR + dataset_name + '/benign/y_test.npy')
def softmax(x):
exp_x = np.exp(x)
return exp_x / np.sum(exp_x)
def compare_nc(model, x_train, y_train, x_test, y_test, x_new, x_old, layer):
l = [0, layer]
coverage1 = Coverage(model, x_train, y_train, x_test, y_test, np.expand_dims(x_new, axis=0))
nc1, _, _ = coverage1.NC(l, threshold=0.75, batch=1024)
coverage2 = Coverage(model, x_train, y_train, x_test, y_test, np.expand_dims(x_old, axis=0))
nc2, _, _ = coverage2.NC(l, threshold=0.75, batch=1024)
if nc1 > nc2:
return True
else:
return False
# the data is in range(-.5, .5)
def load_data(dataset_name):
assert (dataset_name.upper() in ['MNIST', 'CIFAR', 'SVHN', 'EUROSAT', 'FASHION_MNIST', 'OXFORD_FLOWERS102'])
dataset_name = dataset_name.lower()
x_train = np.load(DATA_DIR + dataset_name + '/benign/x_train.npy')
y_train = np.load(DATA_DIR + dataset_name + '/benign/y_train.npy')
x_test = np.load(DATA_DIR + dataset_name + '/benign/x_test.npy')
y_test = np.load(DATA_DIR + dataset_name + '/benign/y_test.npy')
return x_train, y_train, x_test, y_test
def retrain(model, X_train, Y_train, X_test, Y_test, batch_size=128, epochs=50):
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
# training without data augmentation
model.fit(
X_train, Y_train,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
verbose=1,
validation_data=(X_test, Y_test)
)
return model
# (row, col, channel)
def gaussian_blur_transform(AdvSample, radius):
if AdvSample.shape[2] == 3:
sample = np.round((AdvSample + 0.5) * 255)
image = Image.fromarray(np.uint8(sample))
gb_image = image.filter(ImageFilter.GaussianBlur(radius=radius))
gb_image = np.array(gb_image).astype('float32') / 255.0 - 0.5
# print(gb_image.shape)
return gb_image
else:
sample = np.round((AdvSample + 0.5) * 255)
sample = np.squeeze(sample, axis=2)
image = Image.fromarray(np.uint8(sample))
gb_image = image.filter(ImageFilter.GaussianBlur(radius=radius))
gb_image = np.expand_dims(np.array(gb_image).astype('float32'), axis=-1) / 255.0 - 0.5
# print(gb_image.shape)
return gb_image
# use PIL Image save instead of guetzli
def image_compress_transform(IndexAdv, AdvSample, dir_name, quality=50):
if AdvSample.shape[2] == 3:
sample = np.round((AdvSample + .5) * 255)
image = Image.fromarray(np.uint8(sample))
saved_adv_image_path = os.path.join(dir_name, '{}th-adv.jpg'.format(IndexAdv))
image.save(saved_adv_image_path, format='JPEG', quality=quality)
IC_image = Image.open(saved_adv_image_path).convert('RGB')
IC_image = np.array(IC_image).astype('float32') / 255.0 - .5
return IC_image
else:
sample = np.round((AdvSample + .5) * 255)
sample = np.squeeze(sample, axis=2)
image = Image.fromarray(np.uint8(sample), mode='L')
saved_adv_image_path = os.path.join(dir_name, '{}th-adv.jpg'.format(IndexAdv))
image.save(saved_adv_image_path, format='JPEG', quality=quality)
IC_image = Image.open(saved_adv_image_path).convert('L')
IC_image = np.expand_dims(np.array(IC_image).astype('float32'), axis=-1) / 255.0 - .5
return IC_image
# # (row, col, channel)
# def gaussian_blur_transform(AdvSample, radius):
# if AdvSample.shape[2] == 3:
# sample = np.round(AdvSample)
#
# image = Image.fromarray(np.uint8(sample))
# gb_image = image.filter(ImageFilter.GaussianBlur(radius=radius))
# gb_image = np.array(gb_image).astype('float32')
# # print(gb_image.shape)
#
# return gb_image
# else:
# sample = np.round(AdvSample)
# sample = np.squeeze(sample, axis=2)
# image = Image.fromarray(np.uint8(sample))
# gb_image = image.filter(ImageFilter.GaussianBlur(radius=radius))
# gb_image = np.expand_dims(np.array(gb_image).astype('float32'), axis=-1)
# # print(gb_image.shape)
# return gb_image
#
#
#
# # use PIL Image save instead of guetzli
# def image_compress_transform(IndexAdv, AdvSample, dir_name, quality=50):
# if AdvSample.shape[2] == 3:
# sample = np.round(AdvSample)
# image = Image.fromarray(np.uint8(sample))
# saved_adv_image_path = os.path.join(dir_name, '{}th-adv.jpg'.format(IndexAdv))
# image.save(saved_adv_image_path, format='JPEG', quality=quality)
# IC_image = Image.open(saved_adv_image_path).convert('RGB')
# IC_image = np.array(IC_image).astype('float32')
# return IC_image
# else:
# sample = np.round(AdvSample)
# sample = np.squeeze(sample, axis=2)
# image = Image.fromarray(np.uint8(sample), mode='L')
# saved_adv_image_path = os.path.join(dir_name, '{}th-adv.jpg'.format(IndexAdv))
# image.save(saved_adv_image_path, format='JPEG', quality=quality)
# IC_image = Image.open(saved_adv_image_path).convert('L')
# IC_image = np.expand_dims(np.array(IC_image).astype('float32'), axis=-1)
# return IC_image
```
#### File: CovTesting_Replication/Model Accuracy under Different Scenarios/run_adv_train.py
```python
import sys
sys.path.append('..')
import parameters as param
from utils import get_available_gpus
import multiprocessing
from multiprocessing import Pool, Process, Queue, Manager
import os
import tensorflow as tf
def train(gpu_id):
while True:
if not q.empty():
attack_name, dataset, model_name = q.get()
cmd = 'CUDA_VISIBLE_DEVICES=' + gpu_id + ' python adv_train.py --dataset ' + dataset + ' --model ' + model_name + ' --attack ' + attack_name + ' --nb_epochs 200 --batch_size 256'
os.system(cmd)
else:
print("Finished")
return
if __name__=='__main__':
# datasets = ['cifar', 'mnist', 'svhn', 'eurosat']
# model_dict = {
# 'mnist': ['lenet1', 'lenet4', 'lenet5'],
# 'cifar': ['vgg16', 'resnet20', 'resnet56'],
# 'svhn' : ['svhn_model', 'svhn_second', 'svhn_first'],
# 'eurosat': ['resnet20', 'resnet56']
# }
datasets = ['cifar', 'mnist', 'svhn', 'eurosat']
model_dict = {
'mnist': ['lenet5'],
'cifar': ['vgg16'],
'svhn' : ['svhn_second'],
'eurosat': ['resnet56']
}
attack_names = ['fgsm', 'pgd', 'deepfool', 'bim', 'apgd','cw']
### add combinations into queues
manager = multiprocessing.Manager()
q = manager.Queue()
for attack_name in attack_names:
for dataset in datasets:
for model_name in model_dict[dataset]:
q.put((attack_name, dataset, model_name))
p_list = []
for i in range(len(get_available_gpus())):
gpu_id = i
p = multiprocessing.Process(target=train, args=(str(gpu_id), ))
p_list.append(p)
p.start()
for i in p_list:
i.join()
print("All processed finished.")
``` |
{
"source": "jiekeshi/OpenVocabCodeNLM",
"score": 3
} |
#### File: OpenVocabCodeNLM/data/split.py
```python
import re
import json
import time
import string
import logging
import argparse
import multiprocessing as mp
manager = mp.Manager()
q_to_store = manager.Queue()
from tqdm import tqdm
from spiral import ronin
keywords = json.load(open("keywords.json"))
def identifier_split(line):
split_data = []
for tok in line.strip().split(" "):
if tok in keywords["keywords"] or tok.isdigit() or tok in string.punctuation or tok[0] in string.punctuation:
split_data.append(tok)
continue
split_words = []
tok = re.split("([a-zA-Z0-9]+|\W+)", tok)
for s in tok:
if not s == "":
if (s.isalnum() or s.isalpha()) and len(s)>2:
for _ in ronin.split(s): split_words.append(_)
else:
split_words.append(s)
if len(split_words) >= 2:
for i, w in enumerate(split_words):
if i < len(split_words) - 1:
split_data.append(w)
split_data.append("</w>")
else:
split_data.append(w)
elif len(split_words) > 0:
split_data.append(split_words[0])
# return " ".join(split_data)
# # for i in split_data:
# # print(i)
q_to_store.put(" ".join(split_data))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input_fp", "-i")
parser.add_argument("--out_fp", "-o")
args = parser.parse_args()
with open(args.input_fp, "r") as f, \
open(args.out_fp, "w") as fout:
logging.info("Start to process files...")
lines = f.readlines()
pbar = tqdm(total=len(lines))
update = lambda *args: pbar.update()
start_time = time.time()
pool = mp.Pool(mp.cpu_count())
for line in lines:
# all_tokens.append(identifier_split(line, args.split))
pool.apply_async(identifier_split, args=(line,), callback=update)
pool.close()
pool.join()
logging.info("Time cost: {} s".format(str(time.time()-start_time)))
logging.info("Start to write files...")
while not q_to_store.empty():
single_data = q_to_store.get()
if len(single_data):
fout.write(single_data + "\n")
logging.info("Done")
if __name__ == "__main__":
main()
``` |
{
"source": "JIEKEXIAN/DE-GAN",
"score": 2
} |
#### File: DE-GAN/losses/loss.py
```python
import keras.backend as K
import sys
sys.path.append('..')
from lib.config import *
from keras.losses import binary_crossentropy
opt = args()
def rec_M_loss(y_true,y_pred):
return K.mean(binary_crossentropy(y_true,y_pred))
def rec_f_loss(y_true,y_pred):
return K.mean(K.abs(y_true-y_pred))
def rec_l_loss(y_true,y_pred):
return K.mean(binary_crossentropy(y_true,y_pred))
def rec_Sita_loss(x_real,x_rec,fg):
loss = K.mean(K.abs((x_real-x_rec)*(fg+0.5*(1-fg))))
return loss
def loss_code_adv_discriminator(y_true,y_pred,label_true,label_fake):
loss1 = binary_crossentropy(label_true,y_true)
loss2 = binary_crossentropy(label_fake,y_pred)
loss = loss1 + loss2
return K.mean(loss)
def loss_code_adv_generator(label_true,y_true):
loss = binary_crossentropy(label_true,y_true)
return K.mean(loss)
def loss_adv_gen_loss(label_true,y_true):
loss = binary_crossentropy(label_true,y_true)
return K.mean(loss)
def loss_adv_dis_loss(label_true,y_true,label_fake,y_pred):
loss1 = binary_crossentropy(label_true, y_true)
loss2 = binary_crossentropy(label_fake, y_pred)
loss = loss1 + loss2
return K.mean(loss)
def loss_patch_adv_gen(label_true,y_true):
return K.mean(binary_crossentropy(label_true,y_true))
def loss_patch_adv_dis(label_true,y_true,label_fake,y_pred):
loss1 = K.mean(binary_crossentropy(label_true,y_true))
loss2 = K.mean(binary_crossentropy(label_fake,y_pred))
loss = loss1 + loss2
return loss
def loss_all(y_true,vae_out,generator_out):
[image, batch_mask, batch_part, batch_landmark, batch_fg] = y_true
[Cw_z_f, Cw_z_l, Cw_z_M, M_f, X_f, X_l] = vae_out
[D_fake_patch, D_fake_global, image_out] = generator_out
label_f = K.ones_like(Cw_z_f)
label_M = K.ones_like(Cw_z_M)
label_l = K.ones_like(Cw_z_l)
label_p = K.ones_like(D_fake_patch)
label_g = K.ones_like(D_fake_global)
rec_m_loss = rec_M_loss(batch_mask,M_f)
rec_F_loss = rec_f_loss(batch_part,X_f)
rec_L_loss = rec_l_loss(batch_landmark,X_l)
rec_sita_loss = rec_Sita_loss(image,image_out,batch_fg)
lat_f_loss = loss_code_adv_generator(label_f,Cw_z_f)
lat_l_loss = loss_code_adv_generator(label_l,Cw_z_l)
lat_m_loss = loss_code_adv_generator(label_M,Cw_z_M)
adv_patch_g_loss = loss_patch_adv_gen(label_p,D_fake_patch)
adv_global_g_loss = loss_adv_gen_loss(label_g,D_fake_global)
all_loss = 4000*(rec_F_loss+rec_sita_loss+rec_m_loss)\
+2000*rec_L_loss+30*(lat_f_loss+lat_m_loss+lat_l_loss)+30*adv_patch_g_loss+20*adv_global_g_loss
return all_loss
def loss_part_all(y_true,vae_out,generator_out):
[image, batch_part, batch_fg] = y_true
[Cw_z_f, X_f] = vae_out
[D_fake_patch, D_fake_global, image_out] = generator_out
label_f = K.ones_like(Cw_z_f)
label_p = K.ones_like(D_fake_patch)
label_g = K.ones_like(D_fake_global)
rec_F_loss = rec_f_loss(batch_part, X_f)
rec_sita_loss = rec_Sita_loss(image,image_out,batch_fg)
lat_f_loss = loss_code_adv_generator(label_f,Cw_z_f)
adv_patch_g_loss = loss_patch_adv_gen(label_p,D_fake_patch)
adv_global_g_loss = loss_adv_gen_loss(label_g,D_fake_global)
all_loss = 4000*(rec_sita_loss+rec_F_loss)+30*lat_f_loss+30*adv_patch_g_loss+20*adv_global_g_loss
return all_loss
def loss_mask_all(y_true,vae_out,generator_out):
[image, batch_mask, batch_fg] = y_true
[Cw_z_f, M_f] = vae_out
[D_fake_patch, D_fake_global, image_out] = generator_out
label_f = K.ones_like(Cw_z_f)
label_p = K.ones_like(D_fake_patch)
label_g = K.ones_like(D_fake_global)
rec_m_loss = rec_M_loss(batch_mask,M_f)
rec_sita_loss = rec_Sita_loss(image,image_out,batch_fg)
lat_f_loss = loss_code_adv_generator(label_f,Cw_z_f)
adv_patch_g_loss = loss_patch_adv_gen(label_p,D_fake_patch)
adv_global_g_loss = loss_adv_gen_loss(label_g,D_fake_global)
all_loss = 4000*(rec_sita_loss+rec_m_loss)+30*lat_f_loss+30*adv_patch_g_loss+20*adv_global_g_loss
return all_loss
```
#### File: DE-GAN/model/Unet_model.py
```python
from keras.layers import Conv2DTranspose,Conv2D,Input,concatenate,Reshape
from keras import Model
def unet():
inputs = Input(shape=(256, 256, 3))
# z = Input(shape=(512,))
# z_in = Reshape(target_shape=(16, 16, 2))(z)
z = Input(shape=(256,))
z_in = Reshape(target_shape=(16, 16,1))(z)
conv1 = Conv2D(32, kernel_size=3, strides=(1, 1), padding='same', activation='relu')(inputs)
conv1 = Conv2D(32, kernel_size=3, strides=(1, 1), padding='same', activation='relu')(conv1)
conv2 = Conv2D(64, kernel_size=3, strides=(2, 2), padding='same', activation='relu')(conv1)
conv2 = Conv2D(64, kernel_size=3, strides=(1, 1), padding='same', activation='relu')(conv2)
conv3 = Conv2D(128, kernel_size=3, strides=(2, 2), padding='same', activation='relu')(conv2)
conv3 = Conv2D(128, kernel_size=3, strides=(1, 1), padding='same', activation='relu')(conv3)
conv4 = Conv2D(256, kernel_size=3, strides=(2, 2), padding='same', activation='relu')(conv3)
conv4 = Conv2D(256, kernel_size=3, strides=(1, 1), padding='same', activation='relu')(conv4)
conv5 = Conv2D(512, kernel_size=3, strides=(2, 2), padding='same', activation='relu')(conv4)
conv5 = Conv2D(512, kernel_size=3, strides=(1, 1), padding='same', activation='relu')(conv5)
concat0 = concatenate([conv5, z_in], axis=-1)
deconv_4 = Conv2DTranspose(256, kernel_size=3, strides=(2, 2), padding='same', activation='relu')(concat0)
concat1 = concatenate([conv4, deconv_4], axis=-1)
conv7_1 = Conv2D(256, kernel_size=3, strides=(1, 1), padding='same', activation='relu')(concat1)
deconv_3 = Conv2DTranspose(128, kernel_size=3, strides=(2, 2), padding='same', activation='relu')(conv7_1)
concat2 = concatenate([conv3, deconv_3], axis=-1)
conv8_1 = Conv2D(128, kernel_size=3, strides=(1, 1), padding='same', activation='relu')(concat2)
deconv_2 = Conv2DTranspose(64, kernel_size=3, strides=(2, 2), padding='same', activation='relu')(conv8_1)
concat3 = concatenate([conv2,deconv_2],axis=-1) #128
conv9_1 = Conv2D(64, kernel_size=3, strides=(1, 1), padding='same', activation='relu')(concat3)
deconv_1 = Conv2DTranspose(32, kernel_size=3, strides=(2, 2), padding='same', activation='relu')(conv9_1)
concat4 = concatenate([conv1,deconv_1],axis=-1)
conv10_1 = Conv2D(32, kernel_size=3, strides=(1, 1), padding='same', activation='relu')(concat4)
result = Conv2D(3, kernel_size=3, strides=(1, 1), padding='same', activation='sigmoid')(conv10_1)
unet_model = Model([inputs,z],result)
return unet_model
``` |
{
"source": "jielaizhang/datavis",
"score": 3
} |
#### File: datavis/datastats/determine_imageCentre.py
```python
import docopt
import astropy.io.fits as fits
import sys
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "1.0.1"
__date__ = "2020-06-017"
def print_verbose_string(printme,verbose=False,underscores=False):
if verbose:
if underscores:
print("-" * len(f"VERBOSE: {printme}"),file=sys.stdout)
else:
print(f"VERBOSE: {printme}",file=sys.stdout)
def determine_imageCentre(fitsfile,verbose=False):
'''Determine the pixel coordinates of the image center.'''
# read in fits header
h = fits.getheader(fitsfile)
X = h['NAXIS1']
Y = h['NAXIS2']
xcenter = X/2.
ycenter = Y/2.
if verbose:
print(f'The x,y centers in pixel coords are: {xcenter},{ycenter}')
return xcenter, ycenter
####################### BODY OF PROGRAM STARTS HERE ########################
if __name__ == "__main__":
# Read in input arguments
arguments = docopt.docopt(__doc__)
verbose = arguments['--verbose']
debugmode = arguments['--debug']
if debugmode:
print(arguments)
fitsfile = arguments['<fitsfile>']
_,_=determine_imageCentre(fitsfile,verbose=verbose)
```
#### File: datavis/datastats/embed_relativezps.py
```python
import docopt
import numpy as np
import os
from glob import glob
import pandas as pd
import corner
import matplotlib.pyplot as plt
import pylab
import astropy.io.ascii as ascii
from datetime import datetime
import ntpath
# from Jielai modules
from datavis.ascii.match_catalogs import match_catalogs
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "1.0.1"
__date__ = "2020-10-13"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
#########################################
# ======= House Keeping Functions =======
#########################################
'''These functions standardise verbose, debug printing'''
def print_verbose_string(printme,verbose=False,underscores=False):
if verbose:
if underscores:
print("-" * len(f"VERBOSE: {printme}"),file=sys.stdout)
print(f"VERBOSE: {printme}",file=sys.stdout)
print("-" * len(f"VERBOSE: {printme}"),file=sys.stdout)
else:
print(f"VERBOSE: {printme}",file=sys.stdout)
def print_debug_string(printme,debugmode=False,underscores=False):
if debugmode:
if underscores:
print("-" * len(f"DEBUG : {printme}"),file=sys.stdout)
print(f"DEBUG : {printme}",file=sys.stdout)
print("-" * len(f"DEBUG : {printme}"),file=sys.stdout)
else:
print(f"DEBUG : {printme}",file=sys.stdout)
'''These functions help organise or delete files'''
def clearit(fnames,debugmode=False):
for fname in fnames:
if os.path.isfile(fname):
os.remove(fname)
printme = f'Temporary file deleted: {fname}.'
print_debug_string(printme,debugmode=debugmode)
return None
#########################################
# =========== Other Functions ===========
#########################################
def do_header_update(f,key,value,comment,quietmode=False):
d,h = fits.getdata(f,header=True)
if not quietmode:
if key in h:
oldval = h[key]
print(f'{f}: {key}: {oldval}-->{value}')
else:
print(f'{f}: new key- {key}: {value}')
h[key] = (value, comment)
fits.writeto(f,d,header=h)
return None
def embed_relativezp(ref_fits_file,other_fits_file,update_header=False,
verbose=False,debugmode=False,quietmode=False):
# See Jupyter Notebook KNe_02_JielaiZhang_RelativePhotometry.
# See makeScript_modheadRelativeZPs.py
return
##############################################################
####################### Main Function ########################
##############################################################
def embed_relativezps(ref_fits_file,other_fits_files,update_header=False,
verbose=False,debugmode=False,quietmode=False):
keyzp = 'ZPREL'
short_ref_name = ref_fits_file[-60:]
comment = f'ZP referenced to: {short_ref_name}'
# Embed in fits header ZPREL = 0 for ref file
if update_header:
do_header_update(ref_fits_file,keyzp,0.0,comment,quietmode=quietmode)
# For each input other_fits_files, calculate relative photometry and update header
for f in other_fits_files:
return None
############################################################################
####################### BODY OF PROGRAM STARTS HERE ########################
############################################################################
if __name__ == "__main__":
# Read in input arguments
arguments = docopt.docopt(__doc__)
verbose = arguments['--verbose']
debugmode = arguments['--debug']
quietmode = arguments['--quietmode']
if debugmode:
print(arguments)
ref_fits_file = arguments['<ref>']
other_fits_files = arguments['<others>']
update_header = arguments['--update']
_ = embed_relativezps(ref_fits_file,other_fits_files,update_header=update_header,
verbose=verbose,debugmode=debugmode,quietmode=quietmode)
```
#### File: datavis/fits/convolve.py
```python
import docopt
import astropy.io.fits as fits
import numpy as np
import sys, os
import scipy.signal as signal
import astropy.wcs as wcs
def convolve(saveloc, fitsfile, initres, finalres,
inputres_pixel=False, inputres_arcsec=True,
overwrite=False,
verbose=False):
# Check initial resolution is lower than final
if initres > finalres:
sys.exit('sys.exit Error: init_res must be a smaller number than final_res')
# Load in source data and header information
data,header = fits.getdata(fitsfile,header = True)
# nan to zero
data[np.isnan(data)] = 0
# number of x and y pixels
x = header['NAXIS2']
y = header['NAXIS1']
highres = initres
lowres = finalres
if inputres_arcsec:
if inputres_pixel:
sys.exit('Please input either pixel or arcsec as the input resolutions, not both.')
# get the pixelsize in arcsec, where the value is in degree*3600(arcsec/degree)
#pixelsize = header['CDELT2']*3600
w = wcs.WCS(header)
pixelsizes = wcs.utils.proj_plane_pixel_scales(w)*60*60
pixelsize = pixelsizes[0]
# FWHM of current image
FWHM_highres_pix = highres/pixelsize
# FWHM of desired resolution
FWHM_lowres_pix = lowres/pixelsize
# FWHM calulated for the gaussian of convolution kernel
FWHM_kernel_pix = np.sqrt(FWHM_lowres_pix**2 - FWHM_highres_pix**2)
elif inputres_pixel:
FWHM_highres_pix = highres
FWHM_lowres_pix = lowres
# FWHM calulated for the gaussian of convolution kernel
FWHM_kernel_pix = np.sqrt(FWHM_lowres_pix**2 - FWHM_highres_pix**2)
else:
sys.exit('Please indicate whether the input resolutions are in pixel or arcsec units')
# gaussian consant to convert sigma to FWHM 2*sqrt(2ln(2))
constant = 2*np.sqrt(2*np.log(2))
# sigma for the gaussian of convolution kernel
sigma = FWHM_kernel_pix/constant
# making the 2-D image of the convolution kernel by making 2, 1-D gaussian
# and normalized by the gaussian normalization factor
gauss1 = signal.general_gaussian(x,1, sigma)/((sigma)*np.sqrt(2*np.pi))
gauss2 = signal.general_gaussian(y,1, sigma)/((sigma)*np.sqrt(2*np.pi))
# combine these two to create 2D image
kernel = np.outer(gauss1, gauss2)
# convolve the image using signal.fftconvolve premade function and kernel
convolved = signal.fftconvolve(data, kernel, mode='same')
# change the resolution if it's there
header['FWHM'] = (lowres, '[arcsec] Resolution of the map')
# saves as the fits file
if overwrite:
fits.writeto(saveloc, convolved, header,overwrite=True)
elif os.path.isfile(saveloc):
print('******* ERROR *******')
print(saveloc + ' already exists!')
print('---------------------')
sys.exit("sys.exit Error: Remove or rename "+saveloc+" before using this script again")
else:
fits.writeto(saveloc, convolved, header)
print("Convolving done. File is saved in "+saveloc+". \n")
####################### BODY OF PROGRAM STARTS HERE ########################
if __name__ == "__main__":
# Read in input arguments
arguments = docopt.docopt(__doc__)
verbose = arguments['--verbose']
if verbose:
print(arguments)
saveloc = arguments['--out']
fitsfile = arguments['<fitsfile>']
initres = float(arguments['<init_res>'])
finalres = float(arguments['<final_rez>'])
inputres_pixel = arguments['pixel']
inputres_arcsec = arguments['arcsec']
overwrite = arguments['--overwrite']
convolve(saveloc, fitsfile, initres, finalres,
inputres_pixel=inputres_pixel, inputres_arcsec=inputres_arcsec,
overwrite=overwrite,
verbose=verbose)
```
#### File: datavis/fits/subtract_images.py
```python
import docopt
import astropy.io.fits as fits
import numpy as np
import sys, os, ntpath
from pathlib import Path
# Jielai Zhang's modules
from datavis.fits.subtract_image import subtract_image
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "1.0.1"
__date__ = "2020-06-22"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
#########################################
# ======= House Keeping Functions =======
#########################################
'''These functions standardise verbose, debug printing'''
def print_verbose_string(printme,verbose=False,underscores=False):
if verbose:
if underscores:
print("@" * len(f"VERBOSE: {printme}"),file=sys.stdout)
print(f"VERBOSE: {printme}",file=sys.stdout)
print("@" * len(f"VERBOSE: {printme}"),file=sys.stdout)
else:
print(f"VERBOSE: {printme}",file=sys.stdout)
def print_debug_string(printme,debugmode=False,underscores=False):
if debugmode:
if underscores:
print("@" * len(f"DEBUG : {printme}"),file=sys.stdout)
print(f"DEBUG : {printme}",file=sys.stdout)
print("@" * len(f"DEBUG : {printme}"),file=sys.stdout)
else:
print(f"DEBUG : {printme}",file=sys.stdout)
'''These functions help organise or delete files'''
def clearit(fname):
if os.path.isfile(fname):
os.remove(fname)
return None
##############################################################
####################### Main Function ########################
##############################################################
def subtract_images(template,fitsfiles,savedir='./',badpixmapsave=False,overwrite=False,sextractorloc='/opt/local/bin/source-extractor',verbose=False,debugmode=False):
# Start array to save subtracted images
subtracted_images = []
badpixmap_images = []
paths_savedimages = []
paths_savedbadpixmapimages = []
# For each fitspath in fitsfiles, do template - fitspath
for fitspath in fitsfiles:
print_verbose_string(f'Operating on {fitspath}.',underscores=True,verbose=verbose)
# Determine saveloc for subtracted image based on input savedir and input file name
fname = ntpath.basename(fitspath)
fname_stub = Path(fname).stem
saveloc = savedir + os.path.sep + fname_stub + '_sub.fits'
# Assume path to saved badpixmap if badpixmapsave is True.
if badpixmapsave:
badpixsaveloc = saveloc.replace('.fits','_badpixmap.fits')
# Do image subtraction
subtracted_image, badpixmap = subtract_image(template,fitspath,saveloc=saveloc,overwrite=overwrite,sextractorloc=sextractorloc,verbose=verbose,debugmode=debugmode)
# Append to array of subtracted images
subtracted_images.append(subtracted_image)
badpixmap_images.append(badpixmap)
# Append to array of subtracted image and badpixmap saved file locations.
paths_savedimages.append(saveloc)
if badpixmapsave:
paths_savedbadpixmapimages.append(badpixsaveloc)
# After all is done, print out all saved files again:
print('\nSummarise what is saved:')
if badpixmapsave:
for f,b in zip(paths_savedimages,badpixmapsave):
print(f'SAVED :{f},{b}')
else:
for f in paths_savedimages:
print(f'SAVED :{f}')
return subtracted_images
############################################################################
####################### BODY OF PROGRAM STARTS HERE ########################
############################################################################
if __name__ == "__main__":
# Read in input arguments
arguments = docopt.docopt(__doc__)
verbose = arguments['--verbose']
debugmode = arguments['--debug']
if debugmode:
print(arguments)
template = arguments['<template>']
fitsfiles = arguments['<fitsfiles>']
savedir = arguments['--save']
badpixmapsave = arguments['--badpixmapsave']
overwrite = arguments['--overwrite']
sextractorloc = arguments['--sextractor']
_ = subtract_images(template,fitsfiles,savedir=savedir,badpixmapsave=badpixmapsave,overwrite=overwrite,sextractorloc=sextractorloc,verbose=verbose,debugmode=debugmode)
```
#### File: datavis/misc/correct_photometry.py
```python
import docopt
import pandas as pd
import astropy.io.ascii as ascii
from astropy import units as u
import numpy as np
from datavis.ascii.match_catalogs import match_catalogs_df
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "1.0.1"
__date__ = "2021-06-13"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
#########################################
# ======= House Keeping Functions =======
#########################################
##############################################################
####################### Main Function ########################
##############################################################
def correct_photometry(f_ref_cat,f_sci_cat,savedir=None,
radius_threshold=1.0*u.arcsec,
REF_RA_KEY='RA',REF_DEC_KEY='DEC',REF_PHOTOM_KEY='gmag',
SCI_RA_KEY='X_WORLD',SCI_DEC_KEY='Y_WORLD',SCI_PHOTOM_KEY='MAG_AUTO',
verbose=False,debugmode=False,quietmode=False):
# Read in Catalogues
cat_ref = ascii.read(f_ref_cat)
cat_sci = ascii.read(f_sci_cat)
df_ref = pd.DataFrame(cat_ref.as_array())
df_sci = pd.DataFrame(cat_sci.as_array())
# Match catalogues by RA and DEC
df_ref_matched, df_sci_matched = match_catalogs_df(df_ref, df_sci,radius_threshold=radius_threshold,
ref_RA_KEY=REF_RA_KEY,ref_DEC_KEY=REF_DEC_KEY,
sci_RA_KEY=SCI_RA_KEY,sci_DEC_KEY=SCI_DEC_KEY)
if debugmode:
print(f'DEBUG: The number of ref+sci matched sources = {len(df_ref_matched)}')
# Calculate ZP
try:
ref_mags = np.array([float(x) for x in df_ref_matched[REF_PHOTOM_KEY]])
except:
ref_mags_temp = np.array(df_ref_matched[REF_PHOTOM_KEY])
for ii,v in enumerate(ref_mags_temp):
if v == 'NA':
ref_mags_temp[ii] = np.nan
ref_mags = np.array([float(x) for x in ref_mags_temp])
sci_mags = np.array([float(x) for x in df_sci_matched[SCI_PHOTOM_KEY]])
zps = ref_mags - sci_mags
if debugmode:
print(f'DEBUG: The average {SCI_PHOTOM_KEY} of matched sci sources is {np.nanmean(sci_mags)}')
print(f'DEBUG: The average {REF_PHOTOM_KEY} of matched ref sources is {np.nanmean(ref_mags)}')
print(f'DEBUG: The average of the difference is: {np.nanmean(zps)}')
print(f'DEBUG: The median of the difference is: {np.nanmedian(zps)}')
print('DEBUG: Use the median difference to correct photometry.')
ZP = np.nanmedian(zps)
# Calculate corrected photometry
new_sci_mag = df_sci[SCI_PHOTOM_KEY]+ZP
if debugmode:
print(f'DEBUG: The median science magnitude is: {np.median(new_sci_mag)}')
# Save new catalogue
file_ext = f_sci_cat.split('.')[-1]
if savedir==None:
f_save = f_sci_cat.replace('.'+file_ext,'_CORRECTED.'+file_ext)
else:
fname = ntpath.basename(f_sci_cat)
f_save = savedir+os.sep+fname.replace('.'+file_ext,'_CORRECTED.'+file_ext)
df_sci[SCI_PHOTOM_KEY] = new_sci_mag
header_string = ''
for k in np.array(df_sci.keys()):
header_string=header_string+k+' '
np.savetxt(f_save, df_sci.values, fmt='%f',header=header_string)
if not quietmode:
print(f'Saved: {f_save}')
return f_save
############################################################################
####################### BODY OF PROGRAM STARTS HERE ########################
############################################################################
if __name__ == "__main__":
# Read in input arguments
arguments = docopt.docopt(__doc__)
verbose = arguments['--verbose']
debugmode = arguments['--debug']
quietmode = arguments['--quietmode']
if debugmode:
print(arguments)
savedir = arguments['--out']
f_ref_cat = arguments['<reference_cat>']
f_sci_cat = arguments['<science_cat>']
radius_threshold = float(arguments['--RADIUS_THRESHOLD'])*u.arcsec
REF_RA_KEY = arguments['--REF_RA_KEY']
REF_DEC_KEY = arguments['--REF_DEC_KEY']
REF_PHOTOM_KEY = arguments['--REF_PHOTOM_KEY']
SCI_RA_KEY = arguments['--SCI_RA_KEY']
SCI_DEC_KEY = arguments['--SCI_DEC_KEY']
SCI_PHOTOM_KEY = arguments['--SCI_PHOTOM_KEY']
_ = correct_photometry(f_ref_cat,f_sci_cat,savedir=savedir,
radius_threshold=radius_threshold,
REF_RA_KEY=REF_RA_KEY,REF_DEC_KEY=REF_DEC_KEY,REF_PHOTOM_KEY=REF_PHOTOM_KEY,
SCI_RA_KEY=SCI_RA_KEY,SCI_DEC_KEY=SCI_DEC_KEY,SCI_PHOTOM_KEY=SCI_PHOTOM_KEY,
verbose=verbose,debugmode=debugmode,quietmode=quietmode)
```
#### File: datavis/misc/get_angularsep.py
```python
import docopt
import astropy.units as u
from astropy.coordinates import SkyCoord
# dataexplore modules
from misc.radec import hms2deg, dms2deg, deg2hms, deg2dms
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "1.0.1"
__date__ = "2021-01-14"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
##############################################################
####################### Main Functions #######################
##############################################################
def get_angularsep(RA1,DEC1,RA2,DEC2,verbose=False,debugmode=False,quietmode=False):
# check if input format is ok
# do this later
# convert all to decimal
if ':' in 'RA1':
ra1 = hms2deg(RA1)
else:
ra1 = float(RA1)
if ':' in 'RA2':
ra2 = hms2deg(RA2)
else:
ra2 = float(RA2)
if ':' in 'DEC1':
dec1 = dms2deg(DEC1)
else:
dec1 = float(DEC1)
if ':' in 'DEC2':
dec2 = dms2deg(DEC2)
else:
dec2 = float(DEC2)
# Calculate angular separation
c1 = SkyCoord(ra1*u.deg, dec1*u.deg, frame='icrs')
c2 = SkyCoord(ra2*u.deg, dec2*u.deg, frame='icrs')
angular_sep = c1.separation(c2)
print(f'{angular_sep.deg:.4f} deg |', angular_sep, f'| {angular_sep.arcsec:.3f} arcsec')
return angular_sep
############################################################################
####################### BODY OF PROGRAM STARTS HERE ########################
############################################################################
if __name__ == "__main__":
# Read in input arguments
arguments = docopt.docopt(__doc__)
verbose = arguments['--verbose']
debugmode = arguments['--debug']
quietmode = arguments['--quietmode']
if debugmode:
print(arguments)
RADEC1 = arguments['<RADEC1>']
RA1, DEC1 = RADEC1.split(',')
RADEC2 = arguments['<RADEC2>']
RA2, DEC2 = RADEC2.split(',')
_ = get_angularsep(RA1,DEC1,RA2,DEC2,verbose=verbose,debugmode=debugmode,quietmode=quietmode)
```
#### File: datavis/misc/radec.py
```python
import docopt
import astropy.io.fits as fits
import numpy as np
import sys, os, ntpath
from pathlib import Path
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "1.0.1"
__date__ = "2020-08-06"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
#########################################
# ======= House Keeping Functions =======
#########################################
'''These functions standardise verbose, debug printing'''
def print_verbose_string(printme,verbose=False,underscores=False):
if verbose:
if underscores:
print("-" * len(f"VERBOSE: {printme}"),file=sys.stdout)
print(f"VERBOSE: {printme}",file=sys.stdout)
print("-" * len(f"VERBOSE: {printme}"),file=sys.stdout)
else:
print(f"VERBOSE: {printme}",file=sys.stdout)
def print_debug_string(printme,debugmode=False,underscores=False):
if debugmode:
if underscores:
print("-" * len(f"DEBUG : {printme}"),file=sys.stdout)
print(f"DEBUG : {printme}",file=sys.stdout)
print("-" * len(f"DEBUG : {printme}"),file=sys.stdout)
else:
print(f"DEBUG : {printme}",file=sys.stdout)
'''These functions help organise or delete files'''
def clearit(fnames,debugmode=False):
for fname in fnames:
if os.path.isfile(fname):
os.remove(fname)
printme = f'Temporary file deleted: {fname}.'
print_debug_string(printme,debugmode=debugmode)
return None
##############################################################
#################### other RADEC Functions ###################
##############################################################
def minussignornot(x):
if x >= 0:
sign = ''
if x < 0:
sign = '-'
return sign
def hms2deg(ras):
'''hms format: e.g. 12:23:34.32'''
ras_deg = []
for ra in ras:
try:
rh, rm, rs = [float(r) for r in ra.split(':')]
except:
rh, rm = [float(r) for r in ra.split(':')]
rs = 0.0
ra_deg = rh*15 + rm/4 + rs/240
ras_deg.append(ra_deg)
return ras_deg
def dms2deg(decs):
'''dms format: e.g. 12:23:34.32 or -12:23:34.32 '''
decs_deg = []
for dec in decs:
try:
dd, dm, ds = [float(d) for d in dec.split(':')]
except:
dd, dm = [float(d) for d in dec.split(':')]
ds = 0.0
if dd < 0:
sign = -1
else:
sign = 1
dec_deg = dd + sign*dm/60 + sign*ds/3600
decs_deg.append(dec_deg)
return decs_deg
def deg2dms(decs):
decs_dms = []
for dec in decs:
dec = float(dec)
pmsign = minussignornot(dec)
dec = abs(dec)
d = np.floor(dec)
m = np.floor((dec-d)*60.)
s = ((dec-d)*60.-m)*60.
dms = f'{pmsign}{int(d)}:{int(m)}:{s}'
decs_dms.append(dms)
return decs_dms
def deg2hms(ras):
ras_hms = []
for ra in ras:
ra = np.float(ra)
h = np.floor(ra/15)
m = np.floor( (ra/15 - h)*60. )
s = ((ra/15 - h)*60. -m)*60.
hms = f'{int(h)}:{int(m)}:{s}'
ras_hms.append(hms)
return ras_hms
##############################################################
####################### Main Functions #######################
##############################################################
def sexagesimal2deg(RA,DEC,verbose=False,debugmode=False,quietmode=False):
[ra] = hms2deg([RA])
[dec] = dms2deg([DEC])
print(ra,dec)
return None
def deg2sexagecimal(RA,DEC,verbose=False,debugmode=False,quietmode=False):
[ra] = deg2hms([RA])
[dec] = deg2dms([DEC])
print(ra,dec)
return None
############################################################################
####################### BODY OF PROGRAM STARTS HERE ########################
############################################################################
if __name__ == "__main__":
# Read in input arguments
arguments = docopt.docopt(__doc__)
verbose = arguments['--verbose']
debugmode = arguments['--debug']
quietmode = arguments['--quietmode']
if debugmode:
print(arguments)
input_deg = arguments['deci2sexa']
input_sexagecimal = arguments['sexa2deci']
RADEC = arguments['<RADEC>']
RA, DEC = RADEC.split(',')
if input_sexagecimal:
_ = sexagesimal2deg(RA,DEC,verbose=verbose,debugmode=debugmode,quietmode=quietmode)
if input_deg:
_ = deg2sexagecimal(RA,DEC,verbose=verbose,debugmode=debugmode,quietmode=quietmode)
```
#### File: jielaizhang/datavis/template.py
```python
import docopt
import astropy.io.fits as fits
import numpy as np
import sys, os
import ntpath
from pathlib import Path
from datetime import datetime
import string
import random
import pandas as pd
import astropy.io.ascii as ascii
import copy
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "1.0.1"
__date__ = "2021-06-07"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
#########################################
# ======= House Keeping Functions =======
#########################################
'''These functions standardise verbose, debug printing'''
def print_verbose_string(printme,verbose=False,underscores=False):
if verbose:
if underscores:
print("-" * len(f"VERBOSE: {printme}"),file=sys.stdout)
print(f"VERBOSE: {printme}",file=sys.stdout)
print("-" * len(f"VERBOSE: {printme}"),file=sys.stdout)
else:
print(f"VERBOSE: {printme}",file=sys.stdout)
def print_debug_string(printme,debugmode=False,underscores=False):
if debugmode:
if underscores:
print("-" * len(f"DEBUG : {printme}"),file=sys.stdout)
print(f"DEBUG : {printme}",file=sys.stdout)
print("-" * len(f"DEBUG : {printme}"),file=sys.stdout)
else:
print(f"DEBUG : {printme}",file=sys.stdout)
'''These functions help organise or delete files'''
def clearit(fnames,debugmode=False):
for fname in fnames:
if os.path.isfile(fname):
os.remove(fname)
printme = f'Temporary file deleted: {fname}.'
print_debug_string(printme,debugmode=debugmode)
return None
##############################################################
####################### Main Function ########################
##############################################################
def XXX(verbose=False,debugmode=False,quietmode=False):
# Determine save directory
savedir = os.sep.join(saveloc.split(os.sep)[0:-1]) + os.sep
# Determine saveloc for subtracted image based on input savedir and input file name
fname = ntpath.basename(fitspath)
fname_stub = Path(fname).stem
stub = Path(ntpath.basename(fitspath)).stem
saveloc = savedir + os.path.sep + fname_stub + '_sub.fits'
# Convert to dt
dt = datetime.strptime(sunset_CT.iso, '%Y-%m-%d %H:%M:%S.%f') #2020-10-23 16:01:22.058
# Random string
S=5 # number of characters in random string
ran = ''.join(random.choices(string.ascii_uppercase + string.digits, k = S))
# Make temporoary random directory
temp_dir = './temporary_'+ran
os.makedirs(temp_dir)
# Saving fits file
fits.writeto(output,stellar_mask)
printme = f'SAVED : {output}'
print(printme)
# Reading in csvs
a = ascii.read(f)
df = pd.DataFrame(a.as_array())
new_df = df[df['column']>3]
# Plot
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# plot something here
if xyaxislims:
if ~np.isfinite(xmin):
xmin = ax.get_xlim()[0]
if ~np.isfinite(xmax):
xmax = ax.get_xlim()[1]
if ~np.isfinite(ymin):
ymin = ax.get_ylim()[0]
if ~np.isfinite(ymax):
ymax = ax.get_ylim()[1]
plt.ylim([ymin,ymax])
plt.xlim([xmin,xmax])
return None
############################################################################
####################### BODY OF PROGRAM STARTS HERE ########################
############################################################################
if __name__ == "__main__":
# Read in input arguments
arguments = docopt.docopt(__doc__)
verbose = arguments['--verbose']
debugmode = arguments['--debug']
quietmode = arguments['--quietmode']
if debugmode:
print(arguments)
saveloc = arguments['--out']
input_add = arguments['add']
input_subtract = arguments['subtract']
blah = arguments['<blah>']
xyaxislims = arguments['--boundonaxis']
if xyaxislims:
xmin,xmax,ymin,ymax = np.array(xyaxislims.split(',')).astype(np.float)
_ = XXX(verbose=verbose,debugmode=debugmode,quietmode=quietmode)
``` |
{
"source": "jielaizhang/obstools",
"score": 3
} |
#### File: obstools/followup/create_VLTfindingchartInputs.py
```python
import docopt
import numpy as np
from astropy.table import Table
from astropy import wcs
from astropy.wcs import WCS
from astropy.io import fits
import os
import datetime as dt
import os
from astropy.nddata.utils import Cutout2D
from astroquery.vizier import Vizier
from astropy.coordinates import Angle
def create_finding_chart_inputs(RA,DEC,
field,source_name,
templateimage_path,
findingchartdir='/fred/oz100/FINDING_CHARTS/',
verbose=False, debugmode=False):
print('\n#############################################')
print('Creating files needed to create finding chart image for RA: {} DEC: {}'.format(RA,DEC))
print('#############################################\n')
# Let's keep things organized, so always put the outputs of this script here:
outputdir = findingchartdir+'/'+field+'/'+source_name+'/'
if verbose:
print('Template image used: {}\n'.format(templateimage_path))
if not os.path.exists(outputdir):
os.makedirs(outputdir, 0o755)
else:
pass
with fits.open(templateimage_path) as hdu:
size = 1370
w = WCS(hdu[0].header)
head = hdu[0].header
date = dt.datetime.strptime(head['DATE'], '%Y-%m-%dT%H:%M:%S')
xlim = head['NAXIS1']
ylim = head['NAXIS2']
pixcrd_im = np.array([[xlim, ylim]], np.float_)
world_im = w.wcs_pix2world(pixcrd_im, 1)
pixx_im, pixy_im = world_im[0][0], world_im[0][1]
pixcrd = np.array([[RA, DEC]], np.float_)
worldpix = w.wcs_world2pix(pixcrd, 1)
pixx, pixy = worldpix[0][0], worldpix[0][1]
cutout = Cutout2D(hdu[0].data, (pixx, pixy), size, wcs= w)
hdu[0].data = cutout.data
hdu[0].header['CRPIX1'] = cutout.wcs.wcs.crpix[0]
hdu[0].header['CRPIX2'] = cutout.wcs.wcs.crpix[1]
outfile_large = outputdir +'/'+ source_name+'6arcmin.fits'
hdu.writeto(outfile_large, overwrite = True)
print('========SAVED===Large fits cutout file saved: \n {}'.format(outfile_large))
with fits.open(templateimage_path) as hdu:
size = 457
w = WCS(hdu[0].header)
head = hdu[0].header
date = dt.datetime.strptime(head['DATE'], '%Y-%m-%dT%H:%M:%S')
xlim = head['NAXIS1']
ylim = head['NAXIS2']
pixcrd_im = np.array([[xlim, ylim]], np.float_)
world_im = w.wcs_pix2world(pixcrd_im, 1)
pixx_im, pixy_im = world_im[0][0], world_im[0][1]
pixcrd = np.array([[RA, DEC]], np.float_)
worldpix = w.wcs_world2pix(pixcrd, 1)
pixx, pixy = worldpix[0][0], worldpix[0][1]
cutout = Cutout2D(hdu[0].data, (pixx, pixy), size, wcs= w)
hdu[0].data = cutout.data
hdu[0].header['CRPIX1'] = cutout.wcs.wcs.crpix[0]
hdu[0].header['CRPIX2'] = cutout.wcs.wcs.crpix[1]
outfile_small = outputdir + '/' + source_name+'2arcmin.fits'
hdu.writeto(outfile_small, overwrite = True)
print('========SAVED===Small fits cutout file saved: \n {}'.format(outfile_small))
# Create a catalog of sources within the field of view.
if debugmode:
print('Starting to search for GAIA sources')
GAIA_DR2 = "I/345"
RA_DEC = str(f'{RA} {DEC}')
result = Vizier.query_region(RA_DEC, radius=Angle('120"'), catalog=GAIA_DR2)
GAIA_DR2 = Table()
GAIA_DR2['RA'] = result[0]['RA_ICRS']
GAIA_DR2['DEC'] = result[0]['DE_ICRS']
GAIA_DR2['Gmag'] = result[0]['Gmag']
GAIA_DR2['BPmag'] = result[0]['BPmag']
GAIA_DR2['RPmag'] = result[0]['RPmag']
outfile_cat = outputdir + '/' + source_name + '_gaia_star_cat.ascii'
GAIA_DR2.write(outfile_cat, format='ascii', overwrite=True)
print('========SAVED===Small cutout star catalog saved: \n {}'.format(outfile_cat))
print('\nTo Create Finding Chart, run the following in terminal:')
print('--pa 0.0 (position angle of slit)')
print("--PI '<NAME>'")
print("--RunID '0103.D-0625(A)' (Take from proposal, VLT)")
print("--OBname Transient1 (Observation Block or Target Name)")
print("--fitswaveband g-band (for background image)")
print('RADEC={},{}'.format(RA,DEC))
print('largefits={}'.format(outfile_large))
print('smallfits={}'.format(outfile_small))
print('catfile={}'.format(outfile_cat))
print('outfile={}'.format(outputdir+'/'+source_name+'_finding_chart.jpg'))
print('python ~/src/dwftools/FOLLOWUP/create_VLTfindingchart.py $RADEC $largefits $smallfits $catfile -o $outfile')
print('\n#############################################')
print('# YOUR FINDING CHART INPUTS ARE DONE')
print('# FIND THEM HERE: {}'.format(outputdir))
print('#############################################\n')
if __name__ == "__main__":
## read in arguments
arguments = docopt.docopt(__doc__, options_first=True)
# Mandatory arguments
RA = arguments['<RA>']
DEC = arguments['<DEC>']
field = arguments['<field>']
source_name = arguments['<source_name>']
templateimage_path = arguments['<templateimage_path>']
# Optional arguments
verbose = arguments['--verbose']
debugmode = arguments['--debug']
findingchartdir = arguments['--findingchartdir']
if debugmode:
print(arguments)
create_finding_chart_inputs(RA,DEC,
field,source_name,
templateimage_path,
findingchartdir=findingchartdir,
verbose=verbose,debugmode=debugmode)
```
#### File: obstools/followup/create_VLTfindingchart.py
```python
import docopt
import os
import numpy as np
import astropy.io.fits as fits
import matplotlib.pyplot as plt
import aplpy
import astropy.visualization as astrovis
from astropy import wcs
import astropy
import pandas as pd
from pandas.plotting import table
def draw_line(plot, theta, length, ra, dec, color='b', linewidth=1, alpha=0.7):
theta = theta*np.pi/180.0
length = length/2.0
dx = np.sin(theta)*length/(np.cos(dec*np.pi/180.0)*60.0)
dy = np.cos(theta)*length/60.0
coords = np.array([[ra+dx, ra-dx], [dec+dy, dec-dy]])
plot.show_lines([coords], color=color, linewidth=linewidth, alpha=alpha)
return plot
def read_cat_gaia(f_cat,number_fstars,verbose=False):
# Read in catalog
cat = np.genfromtxt(f_cat,names=True,skip_header=0)
catRA = cat['RA']
catDec = cat['DEC']
catG = cat['Gmag']
catB = cat['BPmag']
catR = cat['RPmag']
# Sort catalog entries by g-band mag
catG_sorted = np.sort(catG)
catB_sorted = [x for _,x in sorted(zip(catG,catB))]
catR_sorted = [x for _,x in sorted(zip(catG,catR))]
catRA_sorted = [x for _,x in sorted(zip(catG,catRA))]
catDec_sorted = [x for _,x in sorted(zip(catG,catDec))]
# Pick X brightest stars
catG_sorted_top = catG_sorted[0:number_refstars]
catB_sorted_top = catB_sorted[0:number_refstars]
catR_sorted_top = catR_sorted[0:number_refstars]
catRA_sorted_top = catRA_sorted[0:number_refstars]
catDec_sorted_top = catDec_sorted[0:number_refstars]
# Calculate other information required
catGR_sorted_top = catG_sorted_top-catR_sorted_top
RAoffset_arcsec = (np.array(catRA_sorted_top)-ra)*60.*60.
DECoffset_arcsec = (np.array(catDec_sorted_top)-dec)*60.*60.
return catG_sorted_top,catGR_sorted_top,catRA_sorted_top,catDec_sorted_top,RAoffset_arcsec,DECoffset_arcsec
def create_refstardf(catRA, catDec, catg, catgr, RAoffset_arcsec, DECoffset_arcsec, catalog='gaia', verbose=False):
# Define file
number_refstars = len(catRA)
gband_name = 'mag (gaia G)'
color_name = 'color (gaia G-R)'
if number_refstars > 0:
labels = ['#'+str(x+1) for x in range(number_refstars)]
d = {'Label': labels,
'RA': catRA,
'DEC': catDec,
gband_name:catg,
color_name:catgr,
'RAoffset (arcsec)':RAoffset_arcsec,
'DECoffset (arcsec)':DECoffset_arcsec
}
else:
d = {'Label':['x'],
'RA':['x'],
'DEC':['x'],
gband_name:['x'],
color_name:['x'],
'RAoffset (arcsec)':['x'],
'DECoffset (arcsec':['x']}
print('\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('ISSUE: There were no reference stars found in the catalog entry\n')
df_stars = pd.DataFrame(data=d)
return df_stars
def create_OBinfodf(PI='<NAME>',RunID='?',OBname='Transient1',fitsband='g-band',pa=0.0,verbose=False):
# Define info table
# Table of Observing Block information
d = {'Item': ['Observing Run ID', 'PI name', 'OB/ Target Name','slit PA (deg)','image wavelength band'],
'Entry':[RunID,PI,OBname,pa,fitsband]}
df_info = pd.DataFrame(data=d)
return df_info
def make_chart(ra,dec,f_6arcmin,f_2arcmin,df_stars,df_info,catRA,catDec,pa=0.0,saveloc='./finding_chart.jpg',verbose=False):
color='red'
fig = plt.figure(figsize=(12,24))
# ====== 6 arcmin image ===
# Load fits
d, h = fits.getdata(f_6arcmin,header=True)
# Visualize fits
interval = astrovis.ZScaleInterval()
vmin,vmax=interval.get_limits(d)
img = aplpy.FITSFigure(f_6arcmin, figure=fig, subplot=[0.1,0.6,0.9,0.4])
img.show_grayscale(stretch='linear', vmin=vmin, vmax=vmax)
# Mark RA and DEC target
img.show_markers([ra],[dec],coords_frame='world',s=10)
# Add scalebar
scalebar_length = 1./60. #arcmin
img.add_scalebar(length=scalebar_length)
img.scalebar.set_label('1 arcmin')
img.scalebar.set_linewidth(3)
img.scalebar.set_font_size(15)
img.scalebar.set_font_weight('medium')
# Draw box for 2'x2' zoom in
boxsize_arcsec=2.*60
w = wcs.WCS(h)
[[x1,y1]]=w.wcs_world2pix([ [ra,dec] ], 1)
pixelscale_arcsec = astropy.wcs.utils.proj_plane_pixel_scales(w)*60.*60.
boxsize_pixel = boxsize_arcsec/pixelscale_arcsec[0]
img.show_rectangles([x1], [y1], boxsize_pixel, boxsize_pixel,
coords_frame='pixel',edgecolor='red',linewidth=2)
# ====== 2 arc min image ===
# Visualize image
img2 = aplpy.FITSFigure(f_2arcmin, figure=fig, subplot=[0.1,0.2,0.9,0.4])
img2.show_grayscale(stretch='linear', vmin=vmin, vmax=vmax)
# Draw slit
draw_line(img2,0.,4,ra,dec,color='red')
# Draw scalebar
scalebar_length = 10./60./60. #arcmin
img2.add_scalebar(length=scalebar_length)
img2.scalebar.set_label('10 arcsec')
img2.scalebar.set_linewidth(3)
img2.scalebar.set_font_size(15)
img2.scalebar.set_font_weight('medium')
# Mark RA and DEC target
img2.show_markers([ra],[dec],coords_frame='world',s=100,edgecolor=color,linewidth=2)
img2.add_label(ra+0.006,dec+0.0006,'TARGET',color=color,size='xx-large',weight='bold')
# Show acquisition specific reference stars
number_refstars = len(catRA)
labels = ['#'+str(x+1) for x in range(number_refstars)]
for refstar_ra,refstar_dec,label in zip(catRA,
catDec,
labels):
img2.show_markers([refstar_ra],[refstar_dec],coords_frame='world',s=10,edgecolor=color,linewidth=3)
img2.add_label(refstar_ra+0.002,refstar_dec+0.0002,label,color=color,size='xx-large',weight='bold')
# ====== Tables to be printed ===
# Table of acquisition reference stars
ax2 = fig.add_axes([0.1,0.1,0.9,0.06])
ax2.xaxis.set_visible(False) # hide the x axis
ax2.yaxis.set_visible(False) # hide the y axis
table(ax2, df_stars, rowLabels=['']*df_stars.shape[0], loc='center') # where df is your data frame
# Table of information needed to be printed
ax3 = fig.add_axes([0.1,0.05,0.9,0.04])
ax3.xaxis.set_visible(False) # hide the x axis
ax3.yaxis.set_visible(False) # hide the y axis
table(ax3, df_info, rowLabels=['']*df_info.shape[0], loc='center') # where df is your data frame
# Save figure
fig.savefig(saveloc, bbox_inches='tight')
full_path = os.path.abspath(saveloc)
print('Finding chart saved: ',full_path)
scpcommand='scp <EMAIL>:'+full_path+' ./finding_chart.jpg'
print(scpcommand)
scpcommand='scp <EMAIL>:'+full_path+' '+full_path.split('/')[-1]
print(scpcommand)
return scpcommand
def create_VLTfindingchart(ra,dec,f_6arcmin,f_2arcmin,f_catgaia,pa=0.0,number_refstars=8,
PI='<NAME>',RunID='?',OBname='Transient1',fitsband='g-band',
saveloc='./finding_chart.jpg',
verbose=False):
# Read in catalog
catG,catGR,catRA,catDec,RAoffset_arcsec,DECoffset_arcsec = read_cat_gaia(f_catgaia,number_refstars,verbose=verbose)
# Create pd table for acquisition specific reference stars
df_stars = create_refstardf(catRA, catDec, catG, catGR, RAoffset_arcsec, DECoffset_arcsec, catalog='gaia', verbose=verbose)
# Create pd table of information
df_info = create_OBinfodf(PI=PI,RunID=RunID,OBname=OBname,fitsband=fitsband,pa=pa,verbose=verbose)
# Create finding chart
scpcommand = make_chart(ra,dec,f_6arcmin,f_2arcmin,df_stars,df_info,catRA,catDec,pa=pa,saveloc=saveloc,verbose=verbose)
# Finish up
print('#################################')
print('Congrats, finding chart made! ')
print('\nUse this to get your finding chart on whatever computer you want!\n')
print(scpcommand)
print('#################################')
return None
####################### BODY OF PROGRAM STARTS HERE ########################
if __name__ == "__main__":
# Read in arguments
arguments = docopt.docopt(__doc__)
# Mandatory arguments
RADEC = arguments['<RADEC>']
ra = float(RADEC.split(',')[0])
dec = float(RADEC.split(',')[1])
f_6arcmin = arguments['<6arcminfits>']
f_2arcmin = arguments['<2arcminfits>']
f_catgaia = arguments['<catgaia>']
# Optional arguments
verbose = arguments['--verbose']
debugmode = arguments['--debug']
saveloc = arguments['--out']
pa = float(arguments['--pa'])
number_refstars = int(arguments['--norefstars'])
PI = arguments['--PI']
RunID = arguments['--RunID']
OBname = arguments['--OBname']
fitsband = arguments['--fitswaveband']
if debugmode:
print(arguments)
create_VLTfindingchart(ra,dec,f_6arcmin,f_2arcmin,f_catgaia,pa=pa,number_refstars=number_refstars,
PI=PI,RunID=RunID,OBname=OBname,fitsband=fitsband,
saveloc=saveloc,
verbose=verbose)
``` |
{
"source": "jielinxu/pymilvus",
"score": 3
} |
#### File: pymilvus/examples/example.py
```python
from milvus import Milvus, Prepare, IndexType, Status
import time
# Milvus server IP address and port.
# You may need to change _HOST and _PORT accordingly.
_HOST = '127.0.0.1'
_PORT = '19530'
def main():
milvus = Milvus()
# Connect to Milvus server
# You may need to change _HOST and _PORT accordingly
param = {'host': _HOST, 'port': _PORT}
status = milvus.connect(**param)
# Create table demo_table if it dosen't exist.
table_name = 'demo_table'
if not milvus.has_table(table_name):
param = {
'table_name': table_name,
'dimension': 16,
'index_type': IndexType.FLAT,
'store_raw_vector': False
}
milvus.create_table(Prepare.table_schema(**param))
# Show tables in Milvus server
_, tables = milvus.show_tables()
# Describe demo_table
_, table = milvus.describe_table(table_name)
# create 10 vectors with 16 dimension
vector_list = [
[0.66, 0.01, 0.29, 0.64, 0.75, 0.94, 0.26, 0.79, 0.61, 0.11, 0.25, 0.50, 0.74, 0.37, 0.28, 0.63],
[0.77, 0.65, 0.57, 0.68, 0.29, 0.93, 0.17, 0.15, 0.95, 0.09, 0.78, 0.37, 0.76, 0.21, 0.42, 0.15],
[0.61, 0.38, 0.32, 0.39, 0.54, 0.93, 0.09, 0.81, 0.52, 0.30, 0.20, 0.59, 0.15, 0.27, 0.04, 0.37],
[0.33, 0.03, 0.87, 0.47, 0.79, 0.61, 0.46, 0.77, 0.62, 0.70, 0.85, 0.01, 0.30, 0.41, 0.74, 0.98],
[0.19, 0.80, 0.03, 0.75, 0.22, 0.49, 0.52, 0.91, 0.40, 0.91, 0.79, 0.08, 0.27, 0.16, 0.07, 0.24],
[0.44, 0.36, 0.16, 0.88, 0.30, 0.79, 0.45, 0.31, 0.45, 0.99, 0.15, 0.93, 0.37, 0.25, 0.78, 0.84],
[0.33, 0.37, 0.59, 0.66, 0.76, 0.11, 0.19, 0.38, 0.14, 0.37, 0.97, 0.50, 0.08, 0.69, 0.16, 0.67],
[0.68, 0.97, 0.20, 0.13, 0.30, 0.16, 0.85, 0.21, 0.26, 0.17, 0.81, 0.96, 0.18, 0.40, 0.13, 0.74],
[0.11, 0.26, 0.44, 0.91, 0.89, 0.79, 0.98, 0.91, 0.09, 0.45, 0.07, 0.88, 0.71, 0.35, 0.97, 0.41],
[0.17, 0.54, 0.61, 0.58, 0.25, 0.63, 0.65, 0.71, 0.26, 0.80, 0.28, 0.77, 0.69, 0.02, 0.63, 0.60],
]
vectors = Prepare.records(vector_list)
# Insert vectors into demo_table
status, ids = milvus.add_vectors(table_name=table_name, records=vectors)
# Get demo_table row count
status, result = milvus.get_table_row_count(table_name)
# Wait for 6 seconds, since Milvus server persist vector data every 5 seconds by default.
# You can set data persist interval in Milvus config file.
time.sleep(6)
# Use the 3rd vector for similarity search
query_list = [
vector_list[3]
]
query_vectors = Prepare.records(query_list)
# execute vector similarity search
param = {
'table_name': table_name,
'query_records': query_vectors,
'top_k': 1,
}
status, results = milvus.search_vectors(**param)
if results[0][0].score == 100.0 or result[0][0].id == ids[3]:
print('Query result is correct')
else:
print('Query result isn\'t correct')
# Delete demo_table
status = milvus.delete_table(table_name)
# Disconnect from Milvus
status = milvus.disconnect()
if __name__ == '__main__':
main()
``` |
{
"source": "JIElite/RL-gridworld",
"score": 2
} |
#### File: RL-gridworld/agent/utils.py
```python
def soft_update_network(target, source, tau):
for target_param, source_param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1 - tau) + source_param.data * tau
)
def hard_update_network(target, source):
target.load_state_dict(source.state_dict())
```
#### File: RL-gridworld/env/env.py
```python
from abc import abstractmethod
import tkinter as tk
from .agent import GridAgent
from .grid import EmptyGrid
class Environment:
def __init__(self, render=False):
self.renderable = render
self.renderer = None
self.action_space = None
self.state_space = None
self.agent = None
@abstractmethod
def __compute_reward(self, state, action):
pass
@abstractmethod
def __is_terminal(self):
pass
@abstractmethod
def __init_render(self):
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def step(self, action):
pass
@abstractmethod
def get_state(self):
pass
def set_action_space(self, action_space):
self.action_space = action_space
def get_action_space(self):
return self.action_space, self.action_space.shape
def set_state_space(self, state_space):
self.state_space = state_space
def get_state_space(self):
return self.state_space, self.state_space.shape
def render(self):
if not self.renderer:
raise ValueError
else:
self.renderer.update()
class GridWorld(Environment):
def __init__(self, grid_size=(10, 10), unit_size=40, render=False):
super(GridWorld, self).__init__(render=render)
self.maze_grids = []
self.start_pos = (0, 0)
self.goal_pos_list = []
self.grid_size = grid_size
self.unit_size = unit_size
self.canvas = None
self.__init__render()
@abstractmethod
def init_start_pos(self):
pass
@abstractmethod
def init_goal_pos(self):
pass
@abstractmethod
def init_grid_world(self):
pass
def __init__render(self):
if self.renderable:
self.renderer = tk.Tk()
self.renderer.title(self.__class__.__name__)
self.renderer.geometry('{0}x{1}'.format(
self.grid_size[0]*self.unit_size,
self.grid_size[1]*self.unit_size,
))
self.canvas = tk.Canvas(self.renderer, bg='white',
height=self.grid_size[1]*self.unit_size,
width=self.grid_size[0]*self.unit_size
)
self.canvas.pack()
def draw_grid_worlds(self):
# draw grid line
for c in range(0, self.grid_size[0] * self.unit_size, self.unit_size):
x0, y0, x1, y1 = c, 0, c, self.grid_size[1] * self.unit_size
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, self.grid_size[1] * self.unit_size, self.unit_size):
x0, y0, x1, y1 = 0, r, self.grid_size[0] * self.unit_size, r
self.canvas.create_line(x0, y0, x1, y1)
# fill differernt grids
for y in range(self.grid_size[1]):
for x in range(self.grid_size[0]):
self.maze_grids[y][x].render(self.canvas, self.unit_size)
def init_empty_grids(self):
for y in range(self.grid_size[1]):
self.maze_grids.append([])
for x in range(self.grid_size[0]):
self.maze_grids[y].append(EmptyGrid(x, y))
def set_grid_size(self, grid_size):
self.grid_size = grid_size
def get_grid_size(self):
return self.grid_size
def set_unit_size(self, unit_size):
self.unit_size = unit_size
def get_unit_size(self):
return self.unit_size
def get_state(self):
if isinstance(self.agent, GridAgent):
return self.agent.get_current_state()
else:
raise ValueError('Attribute self.agent does not GridAgent.')
```
#### File: RL-gridworld/maze/demo_maze.py
```python
from .simple_maze import SimpleMaze
class Maze2(SimpleMaze):
def __init__(self, grid_size=(10, 10), unit_size=40, render=False):
super(Maze2, self).__init__(grid_size, unit_size, render)
def init_start_pos(self):
self.start_pos = (0, 0)
def init_wall_pos(self):
self.wall_pos_list = [(1, 1), (1, 2), (2, 1), (6, 6), (7, 7), (8, 8), (1, 8)]
def init_goal_pos(self):
self.goal_pos_list = [(5, 7), (9, 1)]
``` |
{
"source": "jielness/motor_lib",
"score": 3
} |
#### File: motor_lib/motor_lib/motor_lib.py
```python
from serial import Serial
from struct import pack
from time import sleep
class Motor:
def __init__(self, serial='COM4', type1='CEM-FPM', type2='CEM-FPM'):
self.mtype = ('CEM-FPM', 'EM-FPM')
self.PSClib = [24153.6, 37781.28]
self.state = 0
self.motor1_pos = 0
self.motor2_pos = 0
self.motor1_angle = 0
self.motor2_angle = 0
if type1 == self.mtype[0]:
self.SPC1 = self.PSClib[0]
elif type1 == self.mtype[1]:
self.SPC1 = self.PSClib[1]
else:
self.SPC1 = self.PSClib[0]
if type2 == self.mtype[0]:
self.SPC2 = self.PSClib[0]
elif type2 == self.mtype[1]:
self.SPC2 = self.PSClib[1]
else:
self.SPC2 = self.PSClib[0]
self.motor = Serial(serial, 9600, timeout=0.5)
self.state = 1
temp_str = ''
while temp_str != 'END\r\n':
temp_str = self.motor.readline().decode('ascii')
sleep(0.5)
self.get_pos(1)
self.get_pos(2)
print('Please connect Mr.Liu if you meet problem or want to buy it.')
print('Wechat: jilness, Tel: 18868112367')
print('Motor Connected!')
def run(self, motor_num=1, angle=0, ifprint=False, ):
if motor_num == 1:
steps = int(angle * self.SPC1 / 360)
cmd = 'a'.encode('ascii') + pack('i', steps)
elif motor_num == 2:
steps = int(angle * self.SPC2 / 360)
cmd = 'b'.encode('ascii') + pack('i', steps)
else:
return 0
self.sendcmd(cmd)
self.get_pos(motor_num, ifprint)
if ifprint:
print('Motor Rotation finished!')
return 1
def run_continue(self, motor_num=1, state=1,):
if state == 1:
if motor_num == 1:
cmd = ('e'.encode('ascii') + pack('i', 0))
elif motor_num == 2:
cmd = ('h'.encode('ascii') + pack('i', 0))
else:
return 0
self.sendcmd(cmd)
return 1
elif state == -1:
if motor_num == 1:
cmd = ('f'.encode('ascii') + pack('i', 0))
elif motor_num == 2:
cmd = ('i'.encode('ascii') + pack('i', 0))
else:
return 0
self.sendcmd(cmd)
return 1
elif state == 0:
if motor_num == 1:
cmd = ('g'.encode('ascii') + pack('i', 0))
elif motor_num == 2:
cmd = ('j'.encode('ascii') + pack('i', 0))
else:
return 0
self.sendcmd(cmd)
if motor_num == 1:
self.get_pos(1)
elif motor_num == 2:
self.get_pos(2)
else:
return 0
return 1
else:
return 0
def reset(self, motor_num=1, ifprint=False, ):
if motor_num == 1:
self.run(motor_num, -self.motor1_angle, ifprint)
elif motor_num == 2:
self.run(motor_num, -self.motor2_angle, ifprint)
def get_pos(self, motor_num=1, ifprint=False, ):
if motor_num == 1:
cmd = 'c'.encode('ascii') + pack('i', 0)
elif motor_num == 2:
cmd = 'd'.encode('ascii') + pack('i', 0)
else:
return 0
value = self.sendcmd(cmd)
pos = value[0]
if len(pos):
if motor_num == 1:
self.motor1_pos = int(pos)
self.motor1_angle = 360 * self.motor1_pos / self.SPC1
elif motor_num == 2:
self.motor2_pos = int(pos)
self.motor2_angle = 360 * self.motor2_pos / self.SPC2
if ifprint:
if motor_num == 1:
print(f'Motor 1 position: {self.motor1_angle}')
elif motor_num == 2:
print(f'Motor 2 position: {self.motor2_angle}')
return 1
def save_pos(self, ifprint=False, ):
cmd = 'k'.encode('ascii') + pack('i', 0)
self.sendcmd(cmd)
if ifprint:
print(f'Save Position to EEPROM!')
def clear_pos(self, motor_num=1, ifprint=False, ):
if motor_num == 1:
cmd = 'l'.encode('ascii') + pack('i', 0)
elif motor_num == 2:
cmd = 'm'.encode('ascii') + pack('i', 0)
else:
return 0
self.sendcmd(cmd)
if ifprint:
print(f'Clear Motor {motor_num} Data!')
return 1
def sendcmd(self, cmd):
self.motor.write(cmd)
value = list()
temp_str = ''
while temp_str != 'END\r\n':
temp_str = self.motor.readline().decode('ascii')
if len(temp_str) != 0 and temp_str != 'END\r\n':
value.append(temp_str.split()[0])
return value
def disconnect(self, ):
if self.state == 1:
self.save_pos(True)
self.motor.close()
self.state = 0
def __del__(self):
if self.state == 1:
self.save_pos(True)
self.motor.close()
self.state = 0
``` |
{
"source": "JielongZ/3D-UNet-PyTorch-Implementation",
"score": 3
} |
#### File: JielongZ/3D-UNet-PyTorch-Implementation/get_image_patches.py
```python
import os
import glob
import json
import SimpleITK as sitk
from utils import label_converter
def fetch_data_path(data_dir):
"""
Fetch all data path
:param data_dir: the root folder where data stored
:return: data path (pet_path, ct_path, mask_path), dtype: tuple
"""
data_paths = list()
paths_list = glob.glob(os.path.join(os.path.dirname(__file__), data_dir, "*"))
for i, subject_dir in enumerate(paths_list):
if i < 10:
pet_path = os.path.join(subject_dir, "STS_00" + str(i + 1) + "_PT_COR_16.tiff")
ct_path = os.path.join(subject_dir, "STS_00" + str(i + 1) + "_CT_COR_16.tiff")
mask_path = os.path.join(subject_dir, "STS_00" + str(i + 1) + "_MASS_PET_COR_16.tiff")
data_paths.append((pet_path, ct_path, mask_path))
else:
pet_path = os.path.join(subject_dir, "STS_0" + str(i + 1) + "_PT_COR_16.tiff")
ct_path = os.path.join(subject_dir, "STS_0" + str(i + 1) + "_CT_COR_16.tiff")
mask_path = os.path.join(subject_dir, "STS_0" + str(i + 1) + "_MASS_PET_COR_16.tiff")
data_paths.append((pet_path, ct_path, mask_path))
return data_paths
def get_data(data_path):
"""
use (pet_path, ct_path, mask_path) to get corresponding image array
:param data_path: path consists of (pet_path, ct_path, mask_path)
:return: a list of corresponding image path
"""
pet_path, ct_path, mask_path = data_path
pet_img = sitk.GetArrayFromImage(sitk.ReadImage(pet_path))
ct_img = sitk.GetArrayFromImage(sitk.ReadImage(ct_path))
mask = sitk.GetArrayFromImage(sitk.ReadImage(mask_path))
mask = label_converter(mask)
return [pet_img, ct_img, mask]
def get_img_patch_idxs(img, overlap_stepsize):
"""
This function is used to get patch indices of a single image
The patch indices generated here are used to crop one image into patches
:param img: the single image
:param overlap_stepsize: the overlap step size to generate patches
:return: patch indices
"""
patch_idxs = []
depth, height, width = img.shape
patch_depth, patch_height, patch_width = 128, 128, 128
depth_range = list(range(0, depth - patch_depth + 1, overlap_stepsize))
height_range = list(range(0, height - patch_height + 1, overlap_stepsize))
width_range = list(range(0, width - patch_width + 1, overlap_stepsize))
if (depth - patch_depth) % overlap_stepsize != 0:
depth_range.append(depth - patch_depth)
if (height - patch_height) % overlap_stepsize != 0:
height_range.append(height - patch_height)
if (width - patch_width) % overlap_stepsize != 0:
width_range.append(width - patch_width)
for d in depth_range:
for h in height_range:
for w in width_range:
patch_idxs.append((d, h, w))
return patch_idxs
def crop_image(data_path, output_path, overlap_stepsize):
"""
Cropping volumetric images into various patches with fixed size, e.g. (96, 96, 96)
:param data_path: the complete data path for original data
:param output_path: the output root folder for cropped images
:param overlap_stepsize: the overlap step size used for cropping images
:return: data paths and patches dict that
stores number of patches for a single volumetric image and indices for that image
"""
patch_dict = dict()
patch_depth, patch_height, patch_width = 128, 128, 128
no_sample = len(data_path)
for i in range(no_sample):
pet_path, ct_path, mask_path = data_path[i]
subject = os.path.basename(os.path.dirname(pet_path))
print("Start Processing subject {}".format(subject))
pet, ct, mask = get_data(data_path=data_path[i])
patch_idxs = get_img_patch_idxs(img=pet, overlap_stepsize=overlap_stepsize)
total_no_of_patch_idxs = len(patch_idxs)
patch_dict[subject] = (total_no_of_patch_idxs, mask_path, patch_idxs)
for j in range(len(patch_idxs)):
d, h, w = patch_idxs[j]
cropped_pet = sitk.GetImageFromArray(pet[d: d + patch_depth, h: h + patch_height, w: w + patch_width])
cropped_ct = sitk.GetImageFromArray(ct[d: d + patch_depth, h: h + patch_height, w: w + patch_width])
cropped_mask = sitk.GetImageFromArray(mask[d: d + patch_depth, h: h + patch_height, w: w + patch_width])
subject_dir = os.path.basename(os.path.dirname(pet_path))
if not os.path.exists(os.path.join(output_path, subject_dir, "PET")) or \
not os.path.exists(os.path.join(output_path, subject_dir, "CT")) or \
not os.path.exists(os.path.join(output_path, subject_dir, "MASK")):
os.makedirs(os.path.join(output_path, subject_dir, "PET"))
os.makedirs(os.path.join(output_path, subject_dir, "CT"))
os.makedirs(os.path.join(output_path, subject_dir, "MASK"))
sitk.WriteImage(cropped_pet, os.path.join(os.path.join(output_path,
os.path.basename(os.path.dirname(pet_path)),
"PET"),
os.path.splitext(os.path.basename(pet_path))[0]
+ "_" + str(j + 1)
+ os.path.splitext(os.path.basename(pet_path))[1]))
sitk.WriteImage(cropped_ct, os.path.join(os.path.join(output_path,
os.path.basename(os.path.dirname(ct_path)),
"CT"),
os.path.splitext(os.path.basename(ct_path))[0]
+ "_" + str(j + 1)
+ os.path.splitext(os.path.basename(ct_path))[1]))
sitk.WriteImage(cropped_mask, os.path.join(os.path.join(output_path,
os.path.basename(os.path.dirname(mask_path)),
"MASK"),
os.path.splitext(os.path.basename(mask_path))[0]
+ "_" + str(j + 1)
+ os.path.splitext(os.path.basename(mask_path))[1]))
with open("patches_dict.json", "w") as f:
json.dump(patch_dict, f)
return data_path, patch_dict
if __name__ == "__main__":
ps = fetch_data_path("data")
dp, pd = crop_image(ps, "./processed", overlap_stepsize=64)
```
#### File: JielongZ/3D-UNet-PyTorch-Implementation/train.py
```python
import torch
from torch.nn import CrossEntropyLoss
from unet3d_model.unet3d import UnetModel, Trainer
from unet3d_model.tmp import UNet
from unet3d_model.loss import DiceLoss
from data_gen import get_data_paths, data_gen, batch_data_gen
def train_main(data_folder, in_channels, out_channels, learning_rate, no_epochs):
"""
Train module
:param data_folder: data folder
:param in_channels: the input channel of input images
:param out_channels: the final output channel
:param learning_rate: set learning rate for training
:param no_epochs: number of epochs to train model
:return: None
"""
model = UNet(in_dim=in_channels, out_dim=out_channels, num_filters=16)
optim = torch.optim.Adam(params=model.parameters(), lr=learning_rate)
criterion = DiceLoss()
trainer = Trainer(data_dir=data_folder, net=model, optimizer=optim, criterion=criterion, no_epochs=no_epochs)
trainer.train(data_paths_loader=get_data_paths, dataset_loader=data_gen, batch_data_loader=batch_data_gen)
if __name__ == "__main__":
data_dir = "./processed"
train_main(data_folder=data_dir, in_channels=1, out_channels=1, learning_rate=0.0001, no_epochs=10)
``` |
{
"source": "jielyu/animations",
"score": 3
} |
#### File: leetcode/e001_100/q1338.py
```python
from manimlib.imports import *
from itertools import chain
class BasicScene(Scene):
pass
class Problem(BasicScene):
def construct(self):
t = TextMobject('Problem')
self.play(Write(t))
class Solution01(BasicScene):
def construct(self):
t = TextMobject('Solution01')
self.play(Write(t))
```
#### File: src/test/test_mobject.py
```python
from manimlib.imports import *
class TextExample(Scene):
"""文本对象测试实例"""
def construct(self):
title1 = TextMobject("Hello, world")
title2 = TextMobject("你好,中国")
title1.move_to(UL)
title2.move_to(DR)
self.play(Write(title1))
self.wait()
self.play(Write(title2))
self.wait()
self.remove(title1)
self.wait()
class PositionExample(Scene):
"""对象位置操作实例"""
def construct(self):
# to_edge
t1 = TextMobject("to\_edge(UP)")
t1.to_edge(UP)
self.play(Write(t1))
t2 = TextMobject("to\_edge(DOWN)")
t2.to_edge(DOWN)
self.play(Write(t2))
t3 = TextMobject("to\_edge(LEFT)")
t3.to_edge(LEFT)
self.play(Write(t3))
t4 = TextMobject("to\_edge(RIGHT)")
t4.to_edge(RIGHT)
self.play(Write(t4))
self.remove(t1, t2, t3, t4)
self.wait()
# to_corner
t1 = TextMobject("to\_corner(UL)")
t1.to_corner(UL)
self.play(Write(t1))
t2 = TextMobject("to\_corner(UR)")
t2.to_corner(UR)
self.play(Write(t2))
t3 = TextMobject("to\_corner(DR)")
t3.to_corner(DR)
self.play(Write(t3))
t4 = TextMobject("to\_corner(DL)")
t4.to_corner(DL)
self.play(Write(t4))
self.remove(t1, t2, t3, t4)
self.wait()
# move_to
t1 = TextMobject("move\_to()")
t1.move_to(UL)
self.play(Write(t1))
t1.move_to(UR)
self.play(Write(t1))
t1.move_to(DR)
self.play(Write(t1))
t1.move_to(DL)
self.play(Write(t1))
t1.move_to(UL)
self.play(Write(t1))
self.wait()
self.remove(t1)
self.wait()
# next_to
t1 = TextMobject("Text")
t2 = TexMobject("next\_to(Text)")
t2.next_to(3*RIGHT)
self.play(Write(t1))
self.play(Write(t2))
self.wait()
self.remove(t1, t2)
self.wait()
# shift
t1 = TextMobject("shift()")
t1.shift(3*LEFT)
self.play(Write(t1))
t1.shift(3*RIGHT)
self.play(Write(t1))
self.wait()
self.remove(t1)
self.wait()
# rotate
t1 = TextMobject('rotate()')
for i in range(8):
t1.rotate(PI/4)
self.play(Write(t1))
self.wait()
self.wait()
self.remove(t1)
self.wait()
# flip
t1 = TextMobject('flip')
t1.flip(UP)
self.play(Write(t1))
t1.flip(LEFT)
self.play(Write(t1))
t1.flip(RIGHT)
self.play(Write(t1))
t1.flip(DOWN)
self.play(Write(t1))
self.wait()
self.remove(t1)
self.wait()
class SizeTextExample(Scene):
"""改变文本大小测试实例"""
def construct(self):
textHuge = TextMobject("{\\Huge Huge Text 012.\\#!?} Text")
texthuge = TextMobject("{\\huge huge Text 012.\\#!?} Text")
textLARGE = TextMobject("{\\LARGE LARGE Text 012.\\#!?} Text")
textLarge = TextMobject("{\\Large Large Text 012.\\#!?} Text")
textlarge = TextMobject("{\\large large Text 012.\\#!?} Text")
textNormal = TextMobject("{\\normalsize normal Text 012.\\#!?} Text")
textsmall = TextMobject("{\\small small Text 012.\\#!?} Texto normal")
textfootnotesize = TextMobject("{\\footnotesize footnotesize Text 012.\\#!?} Text")
textscriptsize = TextMobject("{\\scriptsize scriptsize Text 012.\\#!?} Text")
texttiny = TextMobject("{\\tiny tiny Texto 012.\\#!?} Text normal")
textHuge.to_edge(UP)
texthuge.next_to(textHuge,DOWN,buff=0.1)
textLARGE.next_to(texthuge,DOWN,buff=0.1)
textLarge.next_to(textLARGE,DOWN,buff=0.1)
textlarge.next_to(textLarge,DOWN,buff=0.1)
textNormal.next_to(textlarge,DOWN,buff=0.1)
textsmall.next_to(textNormal,DOWN,buff=0.1)
textfootnotesize.next_to(textsmall,DOWN,buff=0.1)
textscriptsize.next_to(textfootnotesize,DOWN,buff=0.1)
texttiny.next_to(textscriptsize,DOWN,buff=0.1)
self.add(textHuge, texthuge, textLARGE, textLarge, textlarge,
textNormal, textsmall, textfootnotesize, textscriptsize, texttiny)
self.wait(3)
class TextArrayExample(Scene):
"""文本数组测试实例"""
def construct(self):
t = TexMobject("A", "{B", "\\over", "C}", "D", "E")
t[0].set_color(RED)
t[1].set_color(ORANGE)
t[2].set_color(YELLOW)
t[3].set_color(GREEN)
t[4].set_color(BLUE)
t[5].set_color(BLUE)
self.play(Write(t))
self.wait(2)
t.shift(LEFT*2)
self.play(Write(t))
self.wait()
class VGroupExample(Scene):
"""向量组测试实例"""
def construct(self):
text1 = TextMobject("text1")
text2 = TextMobject("text2 text2")
text3 = TextMobject("text3 text3 text3")
textgroup = VGroup(text1,text2,text3)
textgroup.arrange(
UP,
aligned_edge = LEFT,
buff=0.4
)
self.add(textgroup)
self.wait()
class ShapeExample(Scene):
"""常用几何形状实例"""
def construct(self):
# Dot
dot = Dot(radius=1, color=YELLOW)
dot.move_to(LEFT*3)
self.play(Write(dot))
self.wait()
self.remove(dot)
# Circle
c = Circle(color=BLUE)
c.move_to(RIGHT*3)
self.play(Write(c))
self.wait()
self.remove(c)
# Annulus
a = Annulus()
a.move_to(UP*3)
self.play(Write(a))
self.wait()
self.remove(a)
# Rectangle
rect = Rectangle()
rect.move_to(DOWN*3)
self.play(Write(rect))
self.wait()
self.remove(rect)
# Square
rect = Square()
rect.move_to(UL*3)
self.play(Write(rect))
self.wait()
self.remove(rect)
# Ellipse
e = Ellipse()
e.move_to(UR*3)
self.play(Write(e))
self.wait()
self.remove(e)
# Arc
arc = Arc()
arc.move_to(DR*3)
self.play(Write(arc))
self.wait()
self.remove(arc)
# Line
l = Line()
self.play(Write(l))
self.wait()
self.remove(l)
class Shape3DExample(ThreeDScene):
"""三维对象例程"""
def construct(self):
# Sphere
s = Sphere()
s.move_to(LEFT*3)
self.play(Write(s))
self.wait()
self.remove(s)
# Cube
c = Cube()
c.move_to(RIGHT*3)
self.play(Write(c))
self.wait()
self.remove(c)
# Prism
p = Prism()
p.move_to(UP)
self.play(Write(p))
self.wait()
self.remove(p)
# ParametricSurface
axes = ThreeDAxes()
cylinder = ParametricSurface(
lambda u, v: np.array([
np.cos(TAU * v),
np.sin(TAU * v),
2 * (1 - u)
]),
resolution=(6, 32)).fade(0.5) #Resolution of the surfaces
paraboloid = ParametricSurface(
lambda u, v: np.array([
np.cos(v)*u,
np.sin(v)*u,
u**2
]),v_max=TAU,
checkerboard_colors=[PURPLE_D, PURPLE_E],
resolution=(10, 32)).scale(2)
para_hyp = ParametricSurface(
lambda u, v: np.array([
u,
v,
u**2-v**2
]),v_min=-2,v_max=2,u_min=-2,u_max=2,checkerboard_colors=[BLUE_D, BLUE_E],
resolution=(15, 32)).scale(1)
cone = ParametricSurface(
lambda u, v: np.array([
u*np.cos(v),
u*np.sin(v),
u
]),v_min=0,v_max=TAU,u_min=-2,u_max=2,checkerboard_colors=[GREEN_D, GREEN_E],
resolution=(15, 32)).scale(1)
hip_one_side = ParametricSurface(
lambda u, v: np.array([
np.cosh(u)*np.cos(v),
np.cosh(u)*np.sin(v),
np.sinh(u)
]),v_min=0,v_max=TAU,u_min=-2,u_max=2,checkerboard_colors=[YELLOW_D, YELLOW_E],
resolution=(15, 32))
ellipsoid=ParametricSurface(
lambda u, v: np.array([
1*np.cos(u)*np.cos(v),
2*np.cos(u)*np.sin(v),
0.5*np.sin(u)
]),v_min=0,v_max=TAU,u_min=-PI/2,u_max=PI/2,checkerboard_colors=[TEAL_D, TEAL_E],
resolution=(15, 32)).scale(2)
sphere = ParametricSurface(
lambda u, v: np.array([
1.5*np.cos(u)*np.cos(v),
1.5*np.cos(u)*np.sin(v),
1.5*np.sin(u)
]),v_min=0,v_max=TAU,u_min=-PI/2,u_max=PI/2,checkerboard_colors=[RED_D, RED_E],
resolution=(15, 32)).scale(2)
self.set_camera_orientation(phi=75 * DEGREES)
self.begin_ambient_camera_rotation(rate=0.2)
self.add(axes)
self.play(Write(sphere))
self.wait()
self.play(ReplacementTransform(sphere,ellipsoid))
self.wait()
self.play(ReplacementTransform(ellipsoid,cone))
self.wait()
self.play(ReplacementTransform(cone,hip_one_side))
self.wait()
self.play(ReplacementTransform(hip_one_side,para_hyp))
self.wait()
self.play(ReplacementTransform(para_hyp,paraboloid))
self.wait()
self.play(ReplacementTransform(paraboloid,cylinder))
self.wait()
self.play(FadeOut(cylinder))
self.remove(axes, sphere, ellipsoid, cone, hip_one_side,
para_hyp, paraboloid, cylinder)
# ParametricFunction
curve1=ParametricFunction(
lambda u : np.array([
1.2*np.cos(u),
1.2*np.sin(u),
u/2
]),color=RED,t_min=-TAU,t_max=TAU,
)
curve2=ParametricFunction(
lambda u : np.array([
1.2*np.cos(u),
1.2*np.sin(u),
u
]),color=RED,t_min=-TAU,t_max=TAU,
)
curve1.set_shade_in_3d(True)
curve2.set_shade_in_3d(True)
axes = ThreeDAxes()
self.add(axes)
self.set_camera_orientation(phi=80 * DEGREES,theta=-60*DEGREES)
self.begin_ambient_camera_rotation(rate=0.1)
self.play(ShowCreation(curve1))
self.wait()
self.play(Transform(curve1,curve2),
rate_func=there_and_back,run_time=3)
self.wait()
``` |
{
"source": "jielyu/fresh_attempt",
"score": 2
} |
#### File: fresh_attempt/test_python/test_vispy.py
```python
import numpy as np
from vispy import gloo
from vispy import app
from vispy.util.transforms import perspective, translate, rotate
from vispy.gloo.util import _screenshot
# Create vertices
n, p = 100, 150
data = np.zeros(p * n, [('a_position', np.float32, 2),
('a_color', np.float32, 4),
('a_rotation', np.float32, 4)])
trail = .5 * np.pi
data['a_position'][:, 0] = np.resize(np.linspace(0, trail, n), p * n)
data['a_position'][:, 0] += np.repeat(np.random.uniform(0, 2 * np.pi, p), n)
data['a_position'][:, 1] = np.repeat(np.linspace(0, 2 * np.pi, p), n)
data['a_color'] = 1, 1, 1, 1
data['a_color'] = np.repeat(
np.random.uniform(0.75, 1.00, (p, 4)).astype(np.float32), n, axis=0)
data['a_color'][:, 3] = np.resize(np.linspace(0, 1, n), p * n)
data['a_rotation'] = np.repeat(
np.random.uniform(0, 2 * np.pi, (p, 4)).astype(np.float32), n, axis=0)
vert = """
#version 120
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
uniform float u_size;
uniform float u_clock;
attribute vec2 a_position;
attribute vec4 a_color;
attribute vec4 a_rotation;
varying vec4 v_color;
mat4 build_rotation(vec3 axis, float angle)
{
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c,
oc * axis.x * axis.y - axis.z * s,
oc * axis.z * axis.x + axis.y * s,
0.0,
oc * axis.x * axis.y + axis.z * s,
oc * axis.y * axis.y + c,
oc * axis.y * axis.z - axis.x * s,
0.0,
oc * axis.z * axis.x - axis.y * s,
oc * axis.y * axis.z + axis.x * s,
oc * axis.z * axis.z + c,
0.0,
0.0, 0.0, 0.0, 1.0);
}
void main (void) {
v_color = a_color;
float x0 = 1.5;
float z0 = 0.0;
float theta = a_position.x + u_clock;
float x1 = x0*cos(theta) + z0*sin(theta);
float y1 = 0.0;
float z1 = (z0*cos(theta) - x0*sin(theta))/2.0;
mat4 R = build_rotation(a_rotation.xyz, a_rotation.w);
gl_Position = u_projection * u_view * u_model * R * vec4(x1,y1,z1,1);
gl_PointSize = 8.0 * u_size * sqrt(v_color.a);
}
"""
frag = """
#version 120
varying vec4 v_color;
varying float v_size;
void main()
{
float d = 2*(length(gl_PointCoord.xy - vec2(0.5,0.5)));
gl_FragColor = vec4(v_color.rgb, v_color.a*(1-d));
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, keys='interactive')
self.size = 800, 800
self.program = gloo.Program(vert, frag)
self.view = np.eye(4, dtype=np.float32)
self.model = np.eye(4, dtype=np.float32)
self.projection = np.eye(4, dtype=np.float32)
self.translate = 4.5
#translate(self.view, 0, 0, -self.translate)
self.view = translate([0, 0, -self.translate])
self.program.bind(gloo.VertexBuffer(data))
self.program['u_model'] = self.model
self.program['u_view'] = self.view
self.program['u_size'] = 5 / self.translate
gloo.set_state('translucent', depth_test=False)
self.program['u_clock'] = 0.0
def on_resize(self, event):
width, height = event.size
gloo.set_viewport(0, 0, width, height)
self.projection = perspective(45.0, width / float(height), 1.0, 1000.0)
self.program['u_projection'] = self.projection
def animation(self, t):
""" Added for animation with MoviePy """
self.program['u_clock'] = 2*t
gloo.clear('red')
self.program.draw('points')
return _screenshot((0, 0, self.size[0], self.size[1]))[:,:,:3]
if __name__ == '__main__':
from moviepy.editor import VideoClip
canvas = Canvas()
canvas.show()
clip = VideoClip(canvas.animation, duration=np.pi).resize(0.3)
clip.write_videofile('atom3.mp4', fps=20)
#clip.write_gif('atom3.gif', fps=20, opt='OptimizePlus')
``` |
{
"source": "jielyugt/calibration",
"score": 3
} |
#### File: scripts/calibration/cal_methods.py
```python
import numpy as np
from scipy.optimize import minimize
from sklearn.metrics import log_loss
import pandas as pd
import time
from sklearn.metrics import log_loss, brier_score_loss
from tensorflow.keras.losses import categorical_crossentropy
from os.path import join
import sklearn.metrics as metrics
# Imports to get "utility" package
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath("utility") ) ) )
from utility.unpickle_probs import unpickle_probs
from utility.evaluation import ECE, MCE
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
Parameters:
x (numpy.ndarray): array containing m samples with n-dimensions (m,n)
Returns:
x_softmax (numpy.ndarray) softmaxed values for initial (m,n) array
"""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=1, keepdims=1)
class HistogramBinning():
"""
Histogram Binning as a calibration method. The bins are divided into equal lengths.
The class contains two methods:
- fit(probs, true), that should be used with validation data to train the calibration model.
- predict(probs), this method is used to calibrate the confidences.
"""
def __init__(self, M=15):
"""
M (int): the number of equal-length bins used
"""
self.bin_size = 1./M # Calculate bin size
self.conf = [] # Initiate confidence list
self.upper_bounds = np.arange(self.bin_size, 1+self.bin_size, self.bin_size) # Set bin bounds for intervals
def _get_conf(self, conf_thresh_lower, conf_thresh_upper, probs, true):
"""
Inner method to calculate optimal confidence for certain probability range
Params:
- conf_thresh_lower (float): start of the interval (not included)
- conf_thresh_upper (float): end of the interval (included)
- probs : list of probabilities.
- true : list with true labels, where 1 is positive class and 0 is negative).
"""
# Filter labels within probability range
filtered = [x[0] for x in zip(true, probs) if x[1] > conf_thresh_lower and x[1] <= conf_thresh_upper]
nr_elems = len(filtered) # Number of elements in the list.
if nr_elems < 1:
return 0
else:
# In essence the confidence equals to the average accuracy of a bin
conf = sum(filtered)/nr_elems # Sums positive classes
return conf
def fit(self, probs, true):
"""
Fit the calibration model, finding optimal confidences for all the bins.
Params:
probs: probabilities of data
true: true labels of data
"""
conf = []
# Got through intervals and add confidence to list
for conf_thresh in self.upper_bounds:
temp_conf = self._get_conf((conf_thresh - self.bin_size), conf_thresh, probs = probs, true = true)
conf.append(temp_conf)
self.conf = conf
# Fit based on predicted confidence
def predict(self, probs):
"""
Calibrate the confidences
Param:
probs: probabilities of the data (shape [samples, classes])
Returns:
Calibrated probabilities (shape [samples, classes])
"""
# Go through all the probs and check what confidence is suitable for it.
for i, prob in enumerate(probs):
idx = np.searchsorted(self.upper_bounds, prob)
probs[i] = self.conf[idx]
return probs
class TemperatureScaling():
def __init__(self, temp = 1, maxiter = 50, solver = "BFGS"):
"""
Initialize class
Params:
temp (float): starting temperature, default 1
maxiter (int): maximum iterations done by optimizer, however 8 iterations have been maximum.
"""
self.temp = temp
self.maxiter = maxiter
self.solver = solver
def _loss_fun(self, x, probs, true):
# Calculates the loss using log-loss (cross-entropy loss)
scaled_probs = self.predict(probs, x)
loss = log_loss(y_true=true, y_pred=scaled_probs)
return loss
# Find the temperature
def fit(self, logits, true):
"""
Trains the model and finds optimal temperature
Params:
logits: the output from neural network for each class (shape [samples, classes])
true: one-hot-encoding of true labels.
Returns:
the results of optimizer after minimizing is finished.
"""
true = true.flatten() # Flatten y_val
opt = minimize(self._loss_fun, x0 = 1, args=(logits, true), options={'maxiter':self.maxiter}, method = self.solver)
self.temp = opt.x[0]
return opt
def predict(self, logits, temp = None):
"""
Scales logits based on the temperature and returns calibrated probabilities
Params:
logits: logits values of data (output from neural network) for each class (shape [samples, classes])
temp: if not set use temperatures find by model or previously set.
Returns:
calibrated probabilities (nd.array with shape [samples, classes])
"""
if not temp:
return softmax(logits/self.temp)
else:
return softmax(logits/temp)
def evaluate(probs, y_true, verbose = False, normalize = False, bins = 15):
"""
Evaluate model using various scoring measures: Error Rate, ECE, MCE, NLL, Brier Score
Params:
probs: a list containing probabilities for all the classes with a shape of (samples, classes)
y_true: a list containing the actual class labels
verbose: (bool) are the scores printed out. (default = False)
normalize: (bool) in case of 1-vs-K calibration, the probabilities need to be normalized.
bins: (int) - into how many bins are probabilities divided (default = 15)
Returns:
(error, ece, mce, loss, brier), returns various scoring measures
"""
preds = np.argmax(probs, axis=1) # Take maximum confidence as prediction
if normalize:
confs = np.max(probs, axis=1)/np.sum(probs, axis=1)
# Check if everything below or equal to 1?
else:
confs = np.max(probs, axis=1) # Take only maximum confidence
accuracy = metrics.accuracy_score(y_true, preds) * 100
error = 100 - accuracy
# Calculate ECE
ece = ECE(confs, preds, y_true, bin_size = 1/bins)
# Calculate MCE
mce = MCE(confs, preds, y_true, bin_size = 1/bins)
loss = log_loss(y_true=y_true, y_pred=probs)
y_prob_true = np.array([probs[i, idx] for i, idx in enumerate(y_true)]) # Probability of positive class
brier = brier_score_loss(y_true=y_true, y_prob=y_prob_true) # Brier Score (MSE)
if verbose:
print("Accuracy:", accuracy)
print("Error:", error)
print("ECE:", ece)
print("MCE:", mce)
print("Loss:", loss)
print("brier:", brier)
return (error, ece, mce, loss, brier)
def cal_results(fn, path, files, m_kwargs = {}, approach = "all"):
"""
Calibrate models scores, using output from logits files and given function (fn).
There are implemented to different approaches "all" and "1-vs-K" for calibration,
the approach of calibration should match with function used for calibration.
TODO: split calibration of single and all into separate functions for more use cases.
Params:
fn (class): class of the calibration method used. It must contain methods "fit" and "predict",
where first fits the models and second outputs calibrated probabilities.
path (string): path to the folder with logits files
files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test))
m_kwargs (dictionary): keyword arguments for the calibration class initialization
approach (string): "all" for multiclass calibration and "1-vs-K" for 1-vs-K approach.
Returns:
df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.
"""
df = pd.DataFrame(columns=["Name", "Error", "ECE", "MCE", "Loss", "Brier"])
total_t1 = time.time()
for i, f in enumerate(files):
name = "_".join(f.split("_")[1:-1])
print(name)
t1 = time.time()
FILE_PATH = join(path, f)
(logits_val, y_val), (logits_test, y_test) = unpickle_probs(FILE_PATH)
if approach == "all":
y_val = y_val.flatten()
model = fn(**m_kwargs)
model.fit(logits_val, y_val)
probs_val = model.predict(logits_val)
probs_test = model.predict(logits_test)
error, ece, mce, loss, brier = evaluate(softmax(logits_test), y_test, verbose=True) # Test before scaling
error2, ece2, mce2, loss2, brier2 = evaluate(probs_test, y_test, verbose=False)
print("Error %f; ece %f; mce %f; loss %f, brier %f" % evaluate(probs_val, y_val, verbose=False, normalize=True))
else: # 1-vs-k models
probs_val = softmax(logits_val) # Softmax logits
probs_test = softmax(logits_test)
K = probs_test.shape[1]
# Go through all the classes
for k in range(K):
# Prep class labels (1 fixed true class, 0 other classes)
y_cal = np.array(y_val == k, dtype="int")[:, 0]
# Train model
model = fn(**m_kwargs)
model.fit(probs_val[:, k], y_cal) # Get only one column with probs for given class "k"
probs_val[:, k] = model.predict(probs_val[:, k]) # Predict new values based on the fittting
probs_test[:, k] = model.predict(probs_test[:, k])
# Replace NaN with 0, as it should be close to zero # TODO is it needed?
idx_nan = np.where(np.isnan(probs_test))
probs_test[idx_nan] = 0
idx_nan = np.where(np.isnan(probs_val))
probs_val[idx_nan] = 0
# Get results for test set
error, ece, mce, loss, brier = evaluate(softmax(logits_test), y_test, verbose=True, normalize=False)
error2, ece2, mce2, loss2, brier2 = evaluate(probs_test, y_test, verbose=False, normalize=True)
print("Error %f; ece %f; mce %f; loss %f, brier %f" % evaluate(probs_val, y_val, verbose=False, normalize=True))
df.loc[i*2] = [name, error, ece, mce, loss, brier]
df.loc[i*2+1] = [(name + "_calib"), error2, ece2, mce2, loss2, brier2]
t2 = time.time()
print("Time taken:", (t2-t1), "\n")
total_t2 = time.time()
print("Total time taken:", (total_t2-total_t1))
return df
```
#### File: scripts/dan_nlp/preprocess.py
```python
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import pickle as pkl
import numpy as np
class PreProcessor:
def __init__(self,INPUT_FILE,WE_FILE):
self.input_file = INPUT_FILE
self.we_file = WE_FILE
def tokenize(self):
data = pkl.load(open(self.input_file,"rb"))
self.train_data = np.array(data["train"])
self.val_data = np.array(data["dev"])
questions = np.array(["".join(self.train_data[i,0]) for i in range(self.train_data.shape[0])])
questions_val = np.array(["".join(self.val_data[i,0]) for i in range(self.val_data.shape[0])])
print(questions[0])
#questions = np.array([self.train_data[i,0] for i in range(self.train_data.shape[0])])
#questions_val = np.array([self.train_data[i,0] for i in range(self.val_data.shape[0])])
tokenizer = Tokenizer()
tokenizer.fit_on_texts(questions)
self.sequences = tokenizer.texts_to_sequences(questions)
self.sequences_val = tokenizer.texts_to_sequences(questions_val)
self.word_index = tokenizer.word_index
print("Found %s unique tokens" %(len(self.word_index)))
def make_data(self):
self.MAX_SEQUENCE_LENGTH = max([len(self.sequences[i]) for i in range(len(self.sequences))])
data = pad_sequences(self.sequences,maxlen=self.MAX_SEQUENCE_LENGTH)
data_val = pad_sequences(self.sequences_val,maxlen=self.MAX_SEQUENCE_LENGTH)
answers_train = set(self.train_data[:,1])
answers_val = set(self.val_data[:,1])
answers = answers_train.union(answers_val)
labels_index = {} # labels_index["Henry IV of France"]
answers_index = {} # answers_index[0]
for i,j in enumerate(answers):
labels_index[j] = i
answers_index[i] = j
labels = np.zeros((len(self.sequences),1))
labels_val = np.zeros((len(self.sequences_val),1))
for i in range(len(self.sequences)):
labels[i] = labels_index[self.train_data[i,1]]
for i in range(len(self.sequences_val)):
labels_val[i] = labels_index[self.val_data[i,1]]
labels = to_categorical(labels,num_classes=len(answers))
labels_val = to_categorical(labels_val,num_classes=len(answers))
print("Shape of data tensor: " +str(data.shape))
print("Shape of label tensor: " +str(labels.shape))
return data, labels, data_val, labels_val
def get_word_embedding_matrix(self,EMBEDDING_DIM=100):
embeddings_index = {}
if self.we_file == "rand":
return None
f = open(self.we_file)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
self.embedding_matrix = np.zeros((len(self.word_index)+1, EMBEDDING_DIM))
for word, i in self.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
self.embedding_matrix[i] = embedding_vector
return self.embedding_matrix
if __name__ == "__main__":
pass
```
#### File: scripts/lenet/eval_model_c100.py
```python
import keras
import numpy as np
from keras import optimizers
from keras.datasets import cifar10, cifar100
from keras.models import Sequential
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.callbacks import LearningRateScheduler, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from sklearn.model_selection import train_test_split
import pickle
# Imports to get "utility" package
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath("utility") ) ) )
from utility.evaluation import evaluate_model
batch_size = 128
epochs = 200
iterations = 45000 // batch_size
num_classes10 = 10
num_classes100 = 100
weight_decay = 0.0001
seed = 333
N = 1
weights_file_10 = "../../models/lenet_5_c10.h5"
weights_file_100 = "../../models/lenet_5_c100.h5"
# Model based on LeNet article.
def build_model(n=1, num_classes = 10):
"""
parameters:
n: (int) scaling for model (n times filters in Conv2D and nodes in Dense)
"""
model = Sequential()
model.add(Conv2D(n*6, (5, 5), padding='valid', activation = 'relu', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), input_shape=(32,32,3)))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Conv2D(n*16, (5, 5), padding='valid', activation = 'relu', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay)))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(n*120, activation = 'relu', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay) ))
model.add(Dense(n*84, activation = 'relu', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay) ))
model.add(Dense(num_classes, activation = 'softmax', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay) ))
sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
def color_preprocessing(x_train,x_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
mean = [125.307, 122.95, 113.865]
std = [62.9932, 62.0887, 66.7048]
for i in range(3):
x_train[:,:,:,i] = (x_train[:,:,:,i] - mean[i]) / std[i]
x_test[:,:,:,i] = (x_test[:,:,:,i] - mean[i]) / std[i]
return x_train, x_test
if __name__ == '__main__':
print("Evaluate CIFAR.100 - LeNet")
# load data
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train, x_test = color_preprocessing(x_train, x_test)
x_train45, x_val, y_train45, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=seed) # random_state = seed
y_train45 = keras.utils.to_categorical(y_train45, num_classes100)
y_val = keras.utils.to_categorical(y_val, num_classes100)
y_test = keras.utils.to_categorical(y_test, num_classes100)
# build network
model = build_model(n = N, num_classes = num_classes100)
evaluate_model(model, weights_file_100, x_test, y_test, bins = 15, verbose = True,
pickle_file = "probs_lenet5_c100", x_val = x_val, y_val = y_val)
```
#### File: scripts/resnet_imgnet/image_gen_extended.py
```python
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import sys
import threading
import copy
import inspect
import types
import keras.backend as K
from keras.utils.generic_utils import Progbar
def random_rotation(x, rg, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
theta = np.pi / 180 * np.random.uniform(-rg, rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
h, w = x.shape[row_index], x.shape[col_index]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_shear(x, intensity, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
shear = np.random.uniform(-intensity, intensity)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_barrel_transform(x, intensity):
# TODO
pass
def random_channel_shift(x, intensity, channel_index=0):
x = np.rollaxis(x, channel_index, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x, transform_matrix, channel_index=0, fill_mode='nearest', cval=0.):
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix,
final_offset, order=0, mode=fill_mode, cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, dim_ordering=K.image_dim_ordering(), mode=None, scale=True):
from PIL import Image
x = x.copy()
if dim_ordering == 'th':
x = x.transpose(1, 2, 0)
if scale:
x += max(-np.min(x), 0)
x /= np.max(x)
x *= 255
if x.shape[2] == 3 and mode == 'RGB':
return Image.fromarray(x.astype('uint8'), mode)
elif x.shape[2] == 1 and mode == 'L':
return Image.fromarray(x[:, :, 0].astype('uint8'), mode)
elif mode:
return Image.fromarray(x, mode)
else:
raise Exception('Unsupported array shape: ', x.shape)
def img_to_array(img, dim_ordering=K.image_dim_ordering()):
if dim_ordering not in ['th', 'tf']:
raise Exception('Unknown dim_ordering: ', dim_ordering)
# image has dim_ordering (height, width, channel)
x = np.asarray(img, dtype='float32')
if len(x.shape) == 3:
if dim_ordering == 'th':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if dim_ordering == 'th':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise Exception('Unsupported image shape: ', x.shape)
return x
def load_img(path, target_mode=None, target_size=None):
from PIL import Image
img = Image.open(path)
if target_mode:
img = img.convert(target_mode)
if target_size:
img = img.resize((target_size[1], target_size[0]))
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png'):
return [os.path.join(directory, f) for f in os.listdir(directory)
if os.path.isfile(os.path.join(directory, f)) and re.match('([\w]+\.(?:' + ext + '))', f)]
def pil_image_reader(filepath, target_mode=None, target_size=None, dim_ordering=K.image_dim_ordering(), **kwargs):
img = load_img(filepath, target_mode=target_mode, target_size=target_size)
return img_to_array(img, dim_ordering=dim_ordering)
def standardize(x,
dim_ordering='th',
rescale=False,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
mean=None, std=None,
samplewise_std_normalization=False,
zca_whitening=False, principal_components=None,
featurewise_standardize_axis=None,
samplewise_standardize_axis=None,
fitting=False,
verbose=0,
config={},
**kwargs):
'''
# Arguments
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
featurewise_standardize_axis: axis along which to perform feature-wise center and std normalization.
samplewise_standardize_axis: axis along which to to perform sample-wise center and std normalization.
zca_whitening: apply ZCA whitening.
'''
if fitting:
if config.has_key('_X'):
# add data to _X array
config['_X'][config['_iX']] = x
config['_iX'] +=1
if verbose and config.has_key('_fit_progressbar'):
config['_fit_progressbar'].update(config['_iX'], force=(config['_iX']==fitting))
# the array (_X) is ready to fit
if config['_iX'] >= fitting:
X = config['_X'].astype('float32')
del config['_X']
del config['_iX']
if featurewise_center or featurewise_std_normalization:
featurewise_standardize_axis = featurewise_standardize_axis or 0
if type(featurewise_standardize_axis) is int:
featurewise_standardize_axis = (featurewise_standardize_axis, )
assert 0 in featurewise_standardize_axis, 'feature-wise standardize axis should include 0'
if featurewise_center:
mean = np.mean(X, axis=featurewise_standardize_axis, keepdims=True)
config['mean'] = np.squeeze(mean, axis=0)
X -= mean
if featurewise_std_normalization:
std = np.std(X, axis=featurewise_standardize_axis, keepdims=True)
config['std'] = np.squeeze(std, axis=0)
X /= (std + 1e-7)
if zca_whitening:
flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3]))
sigma = np.dot(flatX.T, flatX) / flatX.shape[1]
U, S, V = linalg.svd(sigma)
config['principal_components'] = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
if verbose:
del config['_fit_progressbar']
else:
# start a new fitting, fitting = total sample number
config['_X'] = np.zeros((fitting,)+x.shape)
config['_iX'] = 0
config['_X'][config['_iX']] = x
config['_iX'] +=1
if verbose:
config['_fit_progressbar'] = Progbar(target=fitting, verbose=verbose)
return x
if rescale:
x *= rescale
# x is a single image, so it doesn't have image number at index 0
if dim_ordering == 'th':
channel_index = 0
if dim_ordering == 'tf':
channel_index = 2
samplewise_standardize_axis = samplewise_standardize_axis or channel_index
if type(samplewise_standardize_axis) is int:
samplewise_standardize_axis = (samplewise_standardize_axis, )
if samplewise_center:
x -= np.mean(x, axis=samplewise_standardize_axis, keepdims=True)
if samplewise_std_normalization:
x /= (np.std(x, axis=samplewise_standardize_axis, keepdims=True) + 1e-7)
if verbose:
if (featurewise_center and mean is None) or (featurewise_std_normalization and std is None) or (zca_whitening and principal_components is None):
print('WARNING: feature-wise standardization and zca whitening will be disabled, please run "fit" first.')
if featurewise_center:
if mean is not None:
x -= mean
if featurewise_std_normalization:
if std is not None:
x /= (std + 1e-7)
if zca_whitening:
if principal_components is not None:
flatx = np.reshape(x, (x.size))
whitex = np.dot(flatx, principal_components)
x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
return x
def center_crop(x, center_crop_size, **kwargs):
centerw, centerh = x.shape[1]//2, x.shape[2]//2
halfw, halfh = center_crop_size[0]//2, center_crop_size[1]//2
return x[:, centerw-halfw:centerw+halfw,centerh-halfh:centerh+halfh]
def random_crop(x, random_crop_size, sync_seed=None, **kwargs):
np.random.seed(sync_seed)
w, h = x.shape[1], x.shape[2]
rangew = (w - random_crop_size[0]) // 2
rangeh = (h - random_crop_size[1]) // 2
offsetw = 0 if rangew == 0 else np.random.randint(rangew)
offseth = 0 if rangeh == 0 else np.random.randint(rangeh)
return x[:, offsetw:offsetw+random_crop_size[0], offseth:offseth+random_crop_size[1]]
def random_transform(x,
dim_ordering='th',
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
sync_seed=None,
**kwargs):
'''
# Arguments
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channels.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided (before applying
any other transformation).
'''
np.random.seed(sync_seed)
x = x.astype('float32')
# x is a single image, so it doesn't have image number at index 0
if dim_ordering == 'th':
img_channel_index = 0
img_row_index = 1
img_col_index = 2
if dim_ordering == 'tf':
img_channel_index = 2
img_row_index = 0
img_col_index = 1
# use composition of homographies to generate final transform that needs to be applied
if rotation_range:
theta = np.pi / 180 * np.random.uniform(-rotation_range, rotation_range)
else:
theta = 0
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
if height_shift_range:
tx = np.random.uniform(-height_shift_range, height_shift_range) * x.shape[img_row_index]
else:
tx = 0
if width_shift_range:
ty = np.random.uniform(-width_shift_range, width_shift_range) * x.shape[img_col_index]
else:
ty = 0
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if shear_range:
shear = np.random.uniform(-shear_range, shear_range)
else:
shear = 0
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if np.isscalar(zoom_range):
zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise Exception('zoom_range should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = np.dot(np.dot(np.dot(rotation_matrix, translation_matrix), shear_matrix), zoom_matrix)
h, w = x.shape[img_row_index], x.shape[img_col_index]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_index,
fill_mode=fill_mode, cval=cval)
if channel_shift_range != 0:
x = random_channel_shift(x, channel_shift_range, img_channel_index)
if horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_index)
if vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_index)
# TODO:
# barrel/fisheye
np.random.seed()
return x
class ImageDataGenerator(object):
'''Generate minibatches with
real-time data augmentation.
# Arguments
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
featurewise_standardize_axis: axis along which to perform feature-wise center and std normalization.
samplewise_standardize_axis: axis along which to to perform sample-wise center and std normalization.
zca_whitening: apply ZCA whitening.
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channels.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided (before applying
any other transformation).
dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
(the depth) is at index 1, in 'tf' mode it is at index 3.
It defaults to the `image_dim_ordering` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "th".
seed: random seed for reproducible pipeline processing. If not None, it will also be used by `flow` or
`flow_from_directory` to generate the shuffle index in case of no seed is set.
'''
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
featurewise_standardize_axis=None,
samplewise_standardize_axis=None,
zca_whitening=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
dim_ordering=K.image_dim_ordering(),
seed=None,
verbose=1):
self.config = copy.deepcopy(locals())
self.config['config'] = self.config
self.config['mean'] = None
self.config['std'] = None
self.config['principal_components'] = None
self.config['rescale'] = rescale
if dim_ordering not in {'tf', 'th'}:
raise Exception('dim_ordering should be "tf" (channel after row and '
'column) or "th" (channel before row and column). '
'Received arg: ', dim_ordering)
self.__sync_seed = self.config['seed'] or np.random.randint(0, np.iinfo(np.int32).max)
self.default_pipeline = []
self.default_pipeline.append(random_transform)
self.default_pipeline.append(standardize)
self.set_pipeline(self.default_pipeline)
self.__fitting = False
self.fit_lock = threading.Lock()
@property
def sync_seed(self):
return self.__sync_seed
@property
def fitting(self):
return self.__fitting
@property
def pipeline(self):
return self.__pipeline
def sync(self, image_data_generator):
self.__sync_seed = image_data_generator.sync_seed
return (self, image_data_generator)
def set_pipeline(self, p):
if p is None:
self.__pipeline = self.default_pipeline
elif type(p) is list:
self.__pipeline = p
else:
raise Exception('invalid pipeline.')
def flow(self, X, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_mode=None, save_format='jpeg'):
return NumpyArrayIterator(
X, y, self,
batch_size=batch_size, shuffle=shuffle, seed=seed,
dim_ordering=self.config['dim_ordering'],
save_to_dir=save_to_dir, save_prefix=save_prefix,
save_mode=save_mode, save_format=save_format)
def flow_from_directory(self, directory,
color_mode=None, target_size=None,
image_reader='pil', reader_config=None,
read_formats=None,
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='',
save_mode=None, save_format='jpeg'):
if reader_config is None:
reader_config={'target_mode':'RGB', 'target_size':(256,256)}
if read_formats is None:
read_formats={'png','jpg','jpeg','bmp'}
return DirectoryIterator(
directory, self,
color_mode=color_mode, target_size=target_size,
image_reader=image_reader, reader_config=reader_config,
read_formats=read_formats,
classes=classes, class_mode=class_mode,
dim_ordering=self.config['dim_ordering'],
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir, save_prefix=save_prefix,
save_mode=save_mode, save_format=save_format)
def process(self, x):
# get next sync_seed
np.random.seed(self.__sync_seed)
self.__sync_seed = np.random.randint(0, np.iinfo(np.int32).max)
self.config['fitting'] = self.__fitting
self.config['sync_seed'] = self.__sync_seed
for p in self.__pipeline:
x = p(x, **self.config)
return x
def fit_generator(self, generator, nb_iter):
'''Fit a generator
# Arguments
generator: Iterator, generate data for fitting.
nb_iter: Int, number of iteration to fit.
'''
with self.fit_lock:
try:
self.__fitting = nb_iter*generator.batch_size
for i in range(nb_iter):
next(generator)
finally:
self.__fitting = False
def fit(self, X, rounds=1):
'''Fit the pipeline on a numpy array
# Arguments
X: Numpy array, the data to fit on.
rounds: how many rounds of fit to do over the data
'''
X = np.copy(X)
with self.fit_lock:
try:
self.__fitting = rounds*X.shape[0]
for r in range(rounds):
for i in range(X.shape[0]):
self.process(X[i])
finally:
self.__fitting = False
class Iterator(object):
def __init__(self, N, batch_size, shuffle, seed):
self.N = N
self.batch_size = batch_size
self.shuffle = shuffle
self.seed = seed
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(N, batch_size, shuffle, seed)
def reset(self):
self.batch_index = 0
def _flow_index(self, N, batch_size=32, shuffle=False, seed=None):
# ensure self.batch_index is 0
self.reset()
while 1:
if self.batch_index == 0:
self.index_array = np.arange(N)
if shuffle:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
self.index_array = np.random.permutation(N)
if seed is not None:
np.random.seed()
current_index = (self.batch_index * batch_size) % N
if N >= current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = N - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (self.index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
def __add__(self, it):
assert self.N == it.N
assert self.batch_size == it.batch_size
assert self.shuffle == it.shuffle
seed = self.seed or np.random.randint(0, np.iinfo(np.int32).max)
it.total_batches_seen = self.total_batches_seen
self.index_generator = self._flow_index(self.N, self.batch_size, self.shuffle, seed)
it.index_generator = it._flow_index(it.N, it.batch_size, it.shuffle, seed)
if (sys.version_info > (3, 0)):
iter_zip = zip
else:
from itertools import izip
iter_zip = izip
return iter_zip(self, it)
def __iter__(self):
# needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
class NumpyArrayIterator(Iterator):
def __init__(self, X, y, image_data_generator,
batch_size=32, shuffle=False, seed=None,
dim_ordering=K.image_dim_ordering(),
save_to_dir=None, save_prefix='',
save_mode=None, save_format='jpeg'):
if y is not None and len(X) != len(y):
raise Exception('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' % (np.asarray(X).shape, np.asarray(y).shape))
self.X = X
self.y = y
self.image_data_generator = image_data_generator
self.dim_ordering = dim_ordering
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_mode = save_mode
self.save_format = save_format
seed = seed or image_data_generator.config['seed']
super(NumpyArrayIterator, self).__init__(X.shape[0], batch_size, shuffle, seed)
def __add__(self, it):
if isinstance(it, NumpyArrayIterator):
assert self.X.shape[0] == it.X.shape[0]
if isinstance(it, DirectoryIterator):
assert self.X.shape[0] == it.nb_sample
it.image_data_generator.sync(self.image_data_generator)
return super(NumpyArrayIterator, self).__add__(it)
def next(self):
# for python 2.x.
# Keeps under lock only the mechanism which advances
# the indexing of each batch
# see http://anandology.com/blog/using-iterators-and-generators/
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock so it can be done in parallel
batch_x = None
for i, j in enumerate(index_array):
x = self.X[j]
x = self.image_data_generator.process(x)
if i == 0:
batch_x = np.zeros((current_batch_size,) + x.shape)
batch_x[i] = x
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.dim_ordering, mode=self.save_mode, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
class DirectoryIterator(Iterator):
def __init__(self, directory, image_data_generator,
color_mode=None, target_size=None,
image_reader="pil", read_formats=None,
reader_config=None,
dim_ordering=K.image_dim_ordering,
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='',
save_mode=None, save_format='jpeg'):
self.directory = directory
self.image_data_generator = image_data_generator
self.image_reader = image_reader
if self.image_reader == 'pil':
self.image_reader = pil_image_reader
if read_formats is None:
read_formats = {'png','jpg','jpeg','bmp'}
if reader_config is None:
reader_config = {'target_mode': 'RGB', 'target_size':None}
self.reader_config = reader_config
# TODO: move color_mode and target_size to reader_config
if color_mode == 'rgb':
self.reader_config['target_mode'] = 'RGB'
elif color_mode == 'grayscale':
self.reader_config['target_mode'] = 'L'
if target_size:
self.reader_config['target_size'] = target_size
self.dim_ordering = dim_ordering
self.reader_config['dim_ordering'] = dim_ordering
if class_mode not in {'categorical', 'binary', 'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_mode = save_mode
self.save_format = save_format
seed = seed or image_data_generator.config['seed']
# first, count the number of samples and classes
self.nb_sample = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
# if no class is found, add '' for scanning the root folder
if class_mode is None and len(classes) == 0:
classes.append('')
self.nb_class = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
for subdir in classes:
subpath = os.path.join(directory, subdir)
for fname in os.listdir(subpath):
is_valid = False
for extension in read_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.nb_sample += 1
print('Found %d images belonging to %d classes.' % (self.nb_sample, self.nb_class))
# second, build an index of the images in the different class subfolders
self.filenames = []
self.classes = np.zeros((self.nb_sample,), dtype='int32')
i = 0
for subdir in classes:
subpath = os.path.join(directory, subdir)
for fname in os.listdir(subpath):
is_valid = False
for extension in read_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.classes[i] = self.class_indices[subdir]
self.filenames.append(os.path.join(subdir, fname))
i += 1
assert len(self.filenames)>0, 'No valid file is found in the target directory.'
self.reader_config['class_mode'] = self.class_mode
self.reader_config['classes'] = self.classes
self.reader_config['filenames'] = self.filenames
self.reader_config['directory'] = self.directory
self.reader_config['nb_sample'] = self.nb_sample
self.reader_config['seed'] = seed
self.reader_config['sync_seed'] = self.image_data_generator.sync_seed
super(DirectoryIterator, self).__init__(self.nb_sample, batch_size, shuffle, seed)
if inspect.isgeneratorfunction(self.image_reader):
self._reader_generator_mode = True
self._reader_generator = []
# set index batch_size to 1
self.index_generator = self._flow_index(self.N, 1 , self.shuffle, seed)
else:
self._reader_generator_mode = False
def __add__(self, it):
if isinstance(it, DirectoryIterator):
assert self.nb_sample == it.nb_sample
assert len(self.filenames) == len(it.filenames)
assert np.alltrue(self.classes == it.classes)
assert self.image_reader == it.image_reader
if inspect.isgeneratorfunction(self.image_reader):
self._reader_generator = []
it._reader_generator = []
if isinstance(it, NumpyArrayIterator):
assert self.nb_sample == self.X.shape[0]
it.image_data_generator.sync(self.image_data_generator)
return super(DirectoryIterator, self).__add__(it)
def next(self):
self.reader_config['sync_seed'] = self.image_data_generator.sync_seed
if self._reader_generator_mode:
sampleCount = 0
batch_x = None
_new_generator_flag = False
while sampleCount<self.batch_size:
for x in self._reader_generator:
_new_generator_flag = False
if x.ndim == 2:
x = np.expand_dims(x, axis=0)
x = self.image_data_generator.process(x)
self.reader_config['sync_seed'] = self.image_data_generator.sync_seed
if sampleCount == 0:
batch_x = np.zeros((self.batch_size,) + x.shape)
batch_x[sampleCount] = x
sampleCount +=1
if sampleCount >= self.batch_size:
break
if sampleCount >= self.batch_size or _new_generator_flag:
break
with self.lock:
index_array, _, _ = next(self.index_generator)
fname = self.filenames[index_array[0]]
self._reader_generator = self.image_reader(os.path.join(self.directory, fname), **self.reader_config)
assert isinstance(self._reader_generator, types.GeneratorType)
_new_generator_flag = True
else:
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock so it can be done in parallel
batch_x = None
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
x = self.image_reader(os.path.join(self.directory, fname), **self.reader_config)
if x.ndim == 2:
x = np.expand_dims(x, axis=0)
x = self.image_data_generator.process(x)
if i == 0:
batch_x = np.zeros((current_batch_size,) + x.shape)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.dim_ordering, mode=self.save_mode, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype('float32')
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), self.nb_class), dtype='float32')
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
```
#### File: scripts/resnet_sd/eval_model_c10.py
```python
import numpy as np
import collections
import pickle
from resnet_sd import resnet_sd_model
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.optimizers import SGD
from keras.datasets import cifar10
from keras.utils import np_utils
# Imports to get "utility" package
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath("utility") ) ) )
from utility.evaluation import evaluate_model
# Per channel mean and std normalization
def color_preprocessing(x_train, x_val, x_test):
x_train = x_train.astype('float32')
x_val = x_val.astype('float32')
x_test = x_test.astype('float32')
mean = np.mean(x_train, axis=(0,1,2)) # Per channel mean
std = np.std(x_train, axis=(0,1,2))
x_train = (x_train - mean) / std
x_val = (x_val - mean) / std
x_test = (x_test - mean) / std
return x_train, x_val, x_test
if __name__ == '__main__':
# constants
img_rows, img_cols = 32, 32
img_channels = 3
nb_epochs = 500
batch_size = 128
nb_classes = 10
seed = 333
weights_file = "../../models/resnet_110_SD_c10.hdf5"
# data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Data splitting (get additional 5k validation set)
# Sklearn to split
x_train45, x_val, y_train45, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=seed) # random_state = seed
x_train45, x_val, x_test = color_preprocessing(x_train45, x_val, x_test) # Mean per channel
y_train45 = np_utils.to_categorical(y_train45, nb_classes) # 1-hot vector
y_val = np_utils.to_categorical(y_val, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
# building and training net
model = resnet_sd_model(img_shape = (32,32), img_channels = 3,
layers = 110, nb_classes = nb_classes, verbose = True)
evaluate_model(model, weights_file, x_test, y_test, bins = 15, verbose = True,
pickle_file = "probs_resnet110_SD_c10", x_val = x_val, y_val = y_val)
``` |
{
"source": "jielyu/ml-models",
"score": 2
} |
#### File: src/dataset/mscoco.py
```python
import os
import io
import itertools
import json
import tempfile
import time
import cv2
import numpy as np
import torch
from torch.utils.data.dataloader import default_collate
import torchvision
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from loguru import logger
import contextlib
from loguru import logger
from tqdm import tqdm
import matplotlib.pyplot as plt
from dataset.datasets_wrapper import Dataset
class COCODataset(Dataset):
"""
COCO dataset class.
"""
COCO_CLASSES = (
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
)
def __init__(
self,
data_dir=None,
json_file="instances_train2017.json",
name="train2017",
img_size=(416, 416),
preproc=None,
cache=False,
dataset_name="MSCOCO-dataset",
):
"""
COCO dataset initialization. Annotation data are read into memory by COCO API.
Args:
data_dir (str): dataset root directory
json_file (str): COCO json file name
name (str): COCO data name (e.g. 'train2017' or 'val2017')
img_size (int): target image size after pre-processing
preproc: data augmentation strategy
dataset_name: name of dataset folder
"""
super().__init__(img_size)
if data_dir is None:
data_dir = os.path.join("dataset", dataset_name)
else:
data_dir = os.path.join(data_dir, dataset_name)
self.data_dir = data_dir
self.json_file = json_file
self.coco = COCO(os.path.join(self.data_dir, "annotations", self.json_file))
self.ids = self.coco.getImgIds()
self.class_ids = sorted(self.coco.getCatIds())
cats = self.coco.loadCats(self.coco.getCatIds())
self._classes = tuple([c["name"] for c in cats])
self.imgs = None
self.name = name
self.img_size = img_size
self.preproc = preproc
self.annotations = self._load_coco_annotations()
if cache:
self._cache_images()
def __len__(self):
return len(self.ids)
def __del__(self):
del self.imgs
def _load_coco_annotations(self):
return [self.load_anno_from_ids(_ids) for _ids in self.ids]
def _cache_images(self):
logger.warning(
"\n********************************************************************************\n"
"You are using cached images in RAM to accelerate training.\n"
"This requires large system RAM.\n"
"Make sure you have 200G+ RAM and 136G available disk space for training COCO.\n"
"********************************************************************************\n"
)
max_h = self.img_size[0]
max_w = self.img_size[1]
cache_file = self.data_dir + "/img_resized_cache_" + self.name + ".array"
if not os.path.exists(cache_file):
logger.info(
"Caching images for the first time. This might take about 20 minutes for COCO"
)
self.imgs = np.memmap(
cache_file,
shape=(len(self.ids), max_h, max_w, 3),
dtype=np.uint8,
mode="w+",
)
from tqdm import tqdm
from multiprocessing.pool import ThreadPool
NUM_THREADs = min(8, os.cpu_count())
loaded_images = ThreadPool(NUM_THREADs).imap(
lambda x: self.load_resized_img(x),
range(len(self.annotations)),
)
pbar = tqdm(enumerate(loaded_images), total=len(self.annotations))
for k, out in pbar:
self.imgs[k][: out.shape[0], : out.shape[1], :] = out.copy()
self.imgs.flush()
pbar.close()
else:
logger.warning(
"You are using cached imgs! Make sure your dataset is not changed!!\n"
"Everytime the self.input_size is changed in your exp file, you need to delete\n"
"the cached data and re-generate them.\n"
)
logger.info("Loading cached imgs...")
self.imgs = np.memmap(
cache_file,
shape=(len(self.ids), max_h, max_w, 3),
dtype=np.uint8,
mode="r+",
)
def load_anno_from_ids(self, id_):
im_ann = self.coco.loadImgs(id_)[0]
width = im_ann["width"]
height = im_ann["height"]
anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False)
annotations = self.coco.loadAnns(anno_ids)
objs = []
for obj in annotations:
x1 = np.max((0, obj["bbox"][0]))
y1 = np.max((0, obj["bbox"][1]))
x2 = np.min((width, x1 + np.max((0, obj["bbox"][2]))))
y2 = np.min((height, y1 + np.max((0, obj["bbox"][3]))))
if obj["area"] > 0 and x2 >= x1 and y2 >= y1:
obj["clean_bbox"] = [x1, y1, x2, y2]
objs.append(obj)
num_objs = len(objs)
res = np.zeros((num_objs, 5))
for ix, obj in enumerate(objs):
cls = self.class_ids.index(obj["category_id"])
res[ix, 0:4] = obj["clean_bbox"]
res[ix, 4] = cls
r = min(self.img_size[0] / height, self.img_size[1] / width)
res[:, :4] *= r
img_info = (height, width)
resized_info = (int(height * r), int(width * r))
file_name = (
im_ann["file_name"]
if "file_name" in im_ann
else "{:012}".format(id_) + ".jpg"
)
return (res, img_info, resized_info, file_name)
def load_anno(self, index):
return self.annotations[index][0]
def load_resized_img(self, index):
img = self.load_image(index)
r = min(self.img_size[0] / img.shape[0], self.img_size[1] / img.shape[1])
resized_img = cv2.resize(
img,
(int(img.shape[1] * r), int(img.shape[0] * r)),
interpolation=cv2.INTER_LINEAR,
).astype(np.uint8)
return resized_img
def load_image(self, index):
file_name = self.annotations[index][3]
img_file = os.path.join(self.data_dir, self.name, file_name)
img = cv2.imread(img_file)
assert img is not None
return img
def pull_item(self, index):
id_ = self.ids[index]
res, img_info, resized_info, _ = self.annotations[index]
if self.imgs is not None:
pad_img = self.imgs[index]
img = pad_img[: resized_info[0], : resized_info[1], :].copy()
else:
img = self.load_resized_img(index)
return img, res.copy(), img_info, np.array([id_])
@Dataset.mosaic_getitem
def __getitem__(self, index):
"""
One image / label pair for the given index is picked up and pre-processed.
Args:
index (int): data index
Returns:
img (numpy.ndarray): pre-processed image
padded_labels (torch.Tensor): pre-processed label data.
The shape is :math:`[max_labels, 5]`.
each label consists of [class, xc, yc, w, h]:
class (float): class index.
xc, yc (float) : center of bbox whose values range from 0 to 1.
w, h (float) : size of bbox whose values range from 0 to 1.
info_img : tuple of h, w.
h, w (int): original shape of the image
img_id (int): same as the input index. Used for evaluation.
"""
img, target, img_info, img_id = self.pull_item(index)
if self.preproc is not None:
img, target = self.preproc(img, target, self.input_dim)
return img, target, img_info, img_id
def time_synchronized():
"""pytorch-accurate time"""
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def postprocess(
prediction, num_classes, conf_thre=0.7, nms_thre=0.45, class_agnostic=False
):
box_corner = prediction.new(prediction.shape)
box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
prediction[:, :, :4] = box_corner[:, :, :4]
output = [None for _ in range(len(prediction))]
for i, image_pred in enumerate(prediction):
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Get score and class with highest confidence
class_conf, class_pred = torch.max(
image_pred[:, 5 : 5 + num_classes], 1, keepdim=True
)
conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= conf_thre).squeeze()
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
detections = torch.cat((image_pred[:, :5], class_conf, class_pred.float()), 1)
detections = detections[conf_mask]
if not detections.size(0):
continue
if class_agnostic:
nms_out_index = torchvision.ops.nms(
detections[:, :4],
detections[:, 4] * detections[:, 5],
nms_thre,
)
else:
nms_out_index = torchvision.ops.batched_nms(
detections[:, :4],
detections[:, 4] * detections[:, 5],
detections[:, 6],
nms_thre,
)
detections = detections[nms_out_index]
if output[i] is None:
output[i] = detections
else:
output[i] = torch.cat((output[i], detections))
return output
def xyxy2xywh(bboxes):
bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 0]
bboxes[:, 3] = bboxes[:, 3] - bboxes[:, 1]
return bboxes
class COCOEvaluator:
"""
COCO AP Evaluation class. All the data in the val2017 dataset are processed
and evaluated by COCO API.
"""
def __init__(
self, dataloader, img_size, confthre, nmsthre, num_classes, testdev=False
):
"""
Args:
dataloader (Dataloader): evaluate dataloader.
img_size (int): image size after preprocess. images are resized
to squares whose shape is (img_size, img_size).
confthre (float): confidence threshold ranging from 0 to 1, which
is defined in the config file.
nmsthre (float): IoU threshold of non-max supression ranging from 0 to 1.
"""
self.dataloader = dataloader
self.img_size = img_size
self.confthre = confthre
self.nmsthre = nmsthre
self.num_classes = num_classes
self.testdev = testdev
def evaluate(
self,
model,
distributed=False,
half=False,
trt_file=None,
decoder=None,
test_size=None,
):
"""
COCO average precision (AP) Evaluation. Iterate inference on the test dataset
and the results are evaluated by COCO API.
NOTE: This function will change training mode to False, please save states if needed.
Args:
model : model to evaluate.
Returns:
ap50_95 (float) : COCO AP of IoU=50:95
ap50 (float) : COCO AP of IoU=50
summary (sr): summary info of evaluation.
"""
# TODO half to amp_test
tensor_type = torch.FloatTensor
if torch.cuda.is_available():
tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor
model = model.eval()
if half:
model = model.half()
ids = []
data_list = []
progress_bar = tqdm
inference_time = 0
nms_time = 0
n_samples = max(len(self.dataloader) - 1, 1)
for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
progress_bar(self.dataloader)
):
with torch.no_grad():
imgs = imgs.type(tensor_type)
# skip the the last iters since batchsize might be not enough for batch inference
is_time_record = cur_iter < len(self.dataloader) - 1
if is_time_record:
start = time.time()
outputs = model(imgs)[0]
if decoder is not None:
outputs = decoder(outputs, dtype=outputs.type())
if is_time_record:
infer_end = time_synchronized()
inference_time += infer_end - start
outputs = postprocess(
outputs, self.num_classes, self.confthre, self.nmsthre
)
if is_time_record:
nms_end = time_synchronized()
nms_time += nms_end - infer_end
data_list.extend(self.convert_to_coco_format(outputs, info_imgs, ids))
statistics = torch.FloatTensor([inference_time, nms_time, n_samples])
if torch.cuda.is_available():
statistics = torch.cuda.FloatTensor([inference_time, nms_time, n_samples])
eval_results = self.evaluate_prediction(data_list, statistics)
return eval_results
def convert_to_coco_format(self, outputs, info_imgs, ids):
data_list = []
for (output, img_h, img_w, img_id) in zip(
outputs, info_imgs[0], info_imgs[1], ids
):
if output is None:
continue
output = output.cpu()
bboxes = output[:, 0:4]
# preprocessing: resize
scale = min(
self.img_size[0] / float(img_h), self.img_size[1] / float(img_w)
)
bboxes /= scale
bboxes = xyxy2xywh(bboxes)
cls = output[:, 6]
scores = output[:, 4] * output[:, 5]
for ind in range(bboxes.shape[0]):
label = self.dataloader.dataset.class_ids[int(cls[ind])]
pred_data = {
"image_id": int(img_id),
"category_id": label,
"bbox": bboxes[ind].numpy().tolist(),
"score": scores[ind].numpy().item(),
"segmentation": [],
} # COCO json format
data_list.append(pred_data)
return data_list
def evaluate_prediction(self, data_dict, statistics):
logger.info("Evaluate in main process...")
annType = ["segm", "bbox", "keypoints"]
inference_time = statistics[0].item()
nms_time = statistics[1].item()
n_samples = statistics[2].item()
a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size)
a_nms_time = 1000 * nms_time / (n_samples * self.dataloader.batch_size)
time_info = ", ".join(
[
"Average {} time: {:.2f} ms".format(k, v)
for k, v in zip(
["forward", "NMS", "inference"],
[a_infer_time, a_nms_time, (a_infer_time + a_nms_time)],
)
]
)
info = time_info + "\n"
# Evaluate the Dt (detection) json comparing with the ground truth
if len(data_dict) > 0:
cocoGt = self.dataloader.dataset.coco
# TODO: since pycocotools can't process dict in py36, write data to json file.
if self.testdev:
json.dump(data_dict, open("./yolox_testdev_2017.json", "w"))
cocoDt = cocoGt.loadRes("./yolox_testdev_2017.json")
else:
_, tmp = tempfile.mkstemp()
json.dump(data_dict, open(tmp, "w"))
cocoDt = cocoGt.loadRes(tmp)
cocoEval = COCOeval(cocoGt, cocoDt, annType[1])
cocoEval.evaluate()
cocoEval.accumulate()
redirect_string = io.StringIO()
with contextlib.redirect_stdout(redirect_string):
cocoEval.summarize()
info += redirect_string.getvalue()
return cocoEval.stats[0], cocoEval.stats[1], info
else:
return 0, 0, info
``` |
{
"source": "jielyu/notebook",
"score": 3
} |
#### File: code/bg28/bg28_cover.py
```python
from manimlib.imports import *
class Cover(Scene):
def construct(self):
t = TextMobject(*[c for c in 'KERAS'])
t.scale(3)
for idx, c in enumerate(t):
if idx % 2 == 0:
c.set_color(BLUE)
else:
c.set_color(GREEN)
self.play(Write(t))
```
#### File: code/bg44/bg44_cover.py
```python
from manimlib.imports import *
class Cover(Scene):
def construct(self):
tf_text = TextMobject('TF', '2.0')
tf_text.shift(UP)
tf_text[0].set_color(BLUE)
tf_text[1].set_color(YELLOW)
tf_text.scale(3.0)
np_text = TextMobject(*[c for c in 'IMAGE'])
for idx, c in enumerate(np_text):
if idx % 2 == 0:
c.set_color(BLUE)
else:
c.set_color(GREEN)
np_text.shift(0.5*DOWN+1*LEFT)
np_text.scale(0.6)
ft_text = TextMobject(*[c for c in 'FineTune'])
for idx, c in enumerate(ft_text):
if idx % 2 == 0:
c.set_color(BLUE)
else:
c.set_color(GREEN)
ft_text.shift(0.5*DOWN+1*RIGHT)
ft_text.scale(0.6)
self.play(Write(tf_text))
self.play(Write(np_text))
self.play(Write(ft_text))
c = Circle()
c.shift(0.5*DOWN+1*LEFT)
c.scale(0.8)
self.play(Write(c))
c2 = Circle()
c2.shift(0.5*DOWN+1*RIGHT)
c2.scale(0.8)
self.play(Write(c2))
```
#### File: code/bg55/bg55_cover.py
```python
from manimlib.imports import *
class Cover(Scene):
def construct(self):
t = TextMobject('分词工具')
t.scale(2)
#t.shift(UP)
t.set_color(BLUE)
self.play(Write(t))
name = TextMobject('jieba')
name.shift(2*UL)
name.scale(2)
name.set_color(GREEN)
c = Circle()
c.move_to(name)
self.play(Write(name),Write(c))
name = TextMobject('SnowNLP')
name.shift(2*UR)
name.scale(2)
name.set_color(GREEN)
c = Circle()
c.move_to(name)
self.play(Write(name),Write(c))
name = TextMobject('thulac')
name.shift(2*DL)
name.scale(2)
name.set_color(GREEN)
c = Circle()
c.move_to(name)
self.play(Write(name),Write(c))
name = TextMobject('pyltp')
name.shift(2*DR)
name.scale(2)
name.set_color(GREEN)
c = Circle()
c.move_to(name)
self.play(Write(name),Write(c))
```
#### File: code/bg76/bg76_cover.py
```python
from manimlib.imports import *
class Cover(Scene):
CONFIG={
"camera_config": {
"background_color": "#F6F6F6",
},
}
def construct(self):
t = TextMobject(*[c for c in [r'Github\\', 'Action']])
t.scale(3)
for idx, c in enumerate(t):
if idx % 2 == 0:
c.set_color(BLUE)
else:
c.set_color(GREEN)
t.shift(UP)
t[0].shift(LEFT)
t[1].shift(RIGHT)
self.play(Write(t))
rect = Rectangle(height=1.5, width=5)
rect.move_to(t[1].get_center())
rect.set_color(RED)
self.play(Write(rect))
rect = Rectangle(height=1.7, width=5.2)
rect.move_to(t[1].get_center())
rect.set_color(BLACK)
self.play(Write(rect))
icon_path = '~/VideoStudio/CNBlueGeek/notebook/src/images/bg76/github-icon.jpg'
img = Image.open(os.path.expanduser(icon_path))
img = np.asarray(img)
img_obj = ImageMobject(img)
img_obj.move_to(t.get_center()+DOWN*3+LEFT)
#img_obj.scale(0.5)
#img_obj.shift(2*DOWN)
self.play(Write(t))
self.play(Animation(img_obj))
```
#### File: code/bg78/bg78_cover.py
```python
from manimlib.imports import *
class Cover(Scene):
def construct(self):
t = TextMobject('?', r' \% 3 = 0')
t[0].set_color(RED)
t.scale(3)
t.shift(UP*2.0)
self.play(Write(t))
ct = []
for i in range(3):
c1 = Circle()
t1 = TextMobject(str(i))
t1.move_to(c1.get_center())
ct1 = VGroup(c1, t1)
ct1.shift(DOWN*0.3)
ct.append(ct1)
ct[0].shift((LEFT+DOWN)*1.414)
ct[2].shift((RIGHT+DOWN)*1.414)
ct[1].set_color(GREEN)
ct[2].set_color(BLUE)
self.play(*[Write(x) for x in ct])
``` |
{
"source": "jie-ma-ai/FedBase",
"score": 2
} |
#### File: jie-ma-ai/FedBase/result_analysis.py
```python
import glob
import pickle
from fedbase.utils.visualize import *
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path
import os
import numpy as np
import warnings
import json
warnings.filterwarnings("ignore")
from platform import system
def plt_maximize():
# See discussion: https://stackoverflow.com/questions/12439588/how-to-maximize-a-plt-show-window-using-python
backend = plt.get_backend()
cfm = plt.get_current_fig_manager()
if backend == "wxAgg":
cfm.frame.Maximize(True)
elif backend == "TkAgg":
if system() == "Windows":
cfm.window.state("zoomed") # This is windows only
else:
cfm.resize(*cfm.window.maxsize())
elif backend == "QT4Agg":
cfm.window.showMaximized()
elif callable(getattr(cfm, "full_screen_toggle", None)):
if not getattr(cfm, "flag_is_max", None):
cfm.full_screen_toggle()
cfm.flag_is_max = True
else:
raise RuntimeError("plt_maximize() is not implemented for current backend:", backend)
# # change name
# # Function to rename multiple files
# def main():
# folder = "./log/log_groupwise/"
# for count, filename in enumerate(os.listdir(folder)):
# # dst = f"Hostel {str(count)}.jpg"
# src =f"{folder}/{filename}" # foldername/filename, if .py file is outside folder
# # dst =f"{folder}/{dst}"
# # rename() function will
# # rename all the files
# try:
# os.rename(src, src.replace('cfl_fashion_mnist_5', 'WCFL_5_fashion_mnist_5'))
# except:
# pass
# # Driver Code
# main()
# print(a)
# folder = './log/log_groupwise/'
folder = './log/'
for dataset in ['fashion_mnist', 'cifar10']:
# for noniid in ['class_2','dirichlet_0.1','dirichlet_0.5','dirichlet_1']:
for noniid in ['2_class','0.1_dirichlet','10_dirichlet','3_class']:
# for noniid in ['0.1','6']:
# central
method = 'central'
file_list = glob.glob(folder+ method + '_' +'*'+dataset+'*')
if len(file_list)>0:
acc= []
for i in file_list:
with open(i, 'rb') as f:
log = json.load(f)
# print(len(log['server']))
acc.append(log['node']['0'])
# if len(log['server'])<=101:
# acc.append(log['server']
acc_df = pd.DataFrame(acc)
# print(acc_df)
acc_df_n = pd.DataFrame()
for i in acc_df.columns:
acc_df_tmp = acc_df[[i]]
acc_df_tmp.loc[:,'round'] = i+1
acc_df_tmp['test acc'] = acc_df_tmp[i].apply(lambda x: x[0])
acc_df_tmp['test macro f1'] = acc_df_tmp[i].apply(lambda x: x[1])
acc_df_n = pd.concat([acc_df_n, acc_df_tmp], axis=0)
# print(acc_df_n)
print(method, dataset, round(np.mean(acc_df_n[acc_df_n['round'] >=98]['test acc'])*100,2), round(np.std(acc_df_n[acc_df_n['round'] >=98]['test acc'])*100,2)\
, round(np.mean(acc_df_n[acc_df_n['round'] >=98]['test macro f1'])*100,2), round(np.std(acc_df_n[acc_df_n['round'] >=98]['test macro f1'])*100,2))
sns.lineplot(x=acc_df_n["round"], y=acc_df_n["test acc"], label = method)
# others
# for method in ['Local', 'Fedavg', 'Ditto', 'WCFL_3', 'WCFL_5', 'WCFL_10']:
# for method in ['local', 'fedavg', 'fedavg_finetune', 'ditto', 'fedprox', 'wecfl_3', 'wecfl_5', 'wecfl_10', 'ifca_3', 'ifca_5', 'ifca_10', 'fesem_3', 'fesem_5', 'fesem_10',\
# 'wecfl_3_0.95', 'wecfl_5_0.95', 'wecfl_10_0.95', 'ifca_3_0.95', 'ifca_5_0.95', 'ifca_10_0.95', 'fesem_3_0.95', 'fesem_5_0.95', 'fesem_10_0.95']:
for method in [ 'fedavg', 'ifca_3', 'ifca_5', 'ifca_10','fesem_3', 'fesem_5', 'fesem_10', 'wecfl_3', 'wecfl_5', 'wecfl_10']:
# for method in ['Fedavg', 'Ditto', 'Local', 'WCFL_5', 'WCFL_10']:
# for method in ['fedavg', 'ditto', 'local', 'cfl']:
file_list = glob.glob(folder+ method + '_' +dataset+'*'+ noniid +'*')
if len(file_list)>0:
acc= []
for i in file_list:
with open(i, 'rb') as f:
log = json.load(f)
# print(len(log['server']))
# acc.append(log['node']['0'])
if len(log['server'])<=101:
acc.append(log['server'])
acc_df = pd.DataFrame(acc)
# print(acc_df)
acc_df_n = pd.DataFrame()
for i in acc_df.columns:
acc_df_tmp = acc_df[[i]]
acc_df_tmp.loc[:,'round'] = i+1
acc_df_tmp['test acc'] = acc_df_tmp[i].apply(lambda x: x[0])
acc_df_tmp['test macro f1'] = acc_df_tmp[i].apply(lambda x: x[1])
# print(acc_df_tmp)
# acc_df_tmp = acc_df_tmp.rename(columns={i : 'test acc'})
acc_df_n = pd.concat([acc_df_n, acc_df_tmp], axis=0)
# print(acc_df_n)
print(method, dataset, noniid, round(np.mean(acc_df_n[acc_df_n['round'] >=98]['test acc'])*100,2), round(np.std(acc_df_n[acc_df_n['round'] >=98]['test acc'])*100,2)\
, round(np.mean(acc_df_n[acc_df_n['round'] >=98]['test macro f1'])*100,2), round(np.std(acc_df_n[acc_df_n['round'] >=98]['test macro f1'])*100,2))
sns.lineplot(x=acc_df_n["round"], y=acc_df_n["test acc"], label = method)
# sns.lineplot(x=acc_df_n["round"], y=acc_df_n["test macro f1"], label = method)
sns.set_theme(style="darkgrid")
plt.title(dataset+'_'+noniid)
# plt.show()
local_file = './vis/' + dataset+'_'+noniid +'.png'
Path(local_file).parent.mkdir(parents=True, exist_ok=True)
plt_maximize()
plt.savefig(local_file)
plt.close()
``` |
{
"source": "jie-ma-ai/TSC_jie",
"score": 3
} |
#### File: TSC_jie/utils/data_clean_up.py
```python
import numpy as np
import torch
def data_loader(input_path, dataset_name):
train_dataset = np.loadtxt(f'{input_path}/{dataset_name}/{dataset_name}_TRAIN.txt')
test_dataset = np.loadtxt(f'{input_path}/{dataset_name}/{dataset_name}_TEST.txt')
# train dataset clean up
x_train, y_train = train_dataset[:, 1:], train_dataset[:, 0]
x_train = fill_nan_0_min(x_train)
# input_size = x_train.shape[1]
# test dataset clean up
x_test, y_test = test_dataset[:, 1:], test_dataset[:, 0]
x_test = fill_nan_0_min(x_test)
# category y
y = np.concatenate((y_train, y_test))
y = one_hot(y)
y_train = y[: y_train.shape[0], :]
y_test = y[-y_test.shape[0]:, :]
# output_size = y_train.shape[1]
# numpy to tensor to torch dataloarder
x_train = torch.from_numpy(x_train)
y_train = torch.from_numpy(y_train)
# training_set = TensorDataset(x_train, y_train)
# train_loader = DataLoader(training_set, batch_size, shuffle=True)
x_test = torch.from_numpy(x_test)
y_test = torch.from_numpy(y_test)
# test_set = TensorDataset(x_test, y_test)
# test_loader = DataLoader(test_set, batch_size, shuffle=False)
print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
return x_train, y_train, x_test, y_test
def fill_nan_0_min(arr):
mask = np.isnan(arr)
print('#NA: ', len(mask[mask == True]))
# arr = np.where(~mask, arr, [0])
# arr = np.where(~mask, arr, min(arr.min(), [0]))
arr[mask] = 0
arr[mask] = min(arr.min(), 0)
return arr
def one_hot(arr):
uni = np.unique(arr)
out = np.zeros((arr.shape[0], uni.shape[0]))
for i in range(uni.shape[0]):
out[arr == uni[i], i] = 1
return out
def one_hot_reverse(arr):
out = torch.zeros(arr.shape[0])
for i in range(arr.shape[0]):
out[i] = torch.argmax(arr[i, :])
return out
# a = np.array([[np.nan, -1, 2, np.nan, 3, np.nan], [np.nan, -1, 2, np.nan, 3, np.nan]])
# print(fill_nan_0_min(a))
# b = np.array([0,1, 2, 2,4, 4])
# print(one_hot(b))
# print(data_loader(r'd:/project_git/TSC_jie/data/Univariate_arff', 'Car', 10))
``` |
{
"source": "JieMEI1994/machine-learning-from-scratches",
"score": 4
} |
#### File: deep learning/function/activation_function.py
```python
import numpy as np
class sigmoid:
def forward(self, Z):
A = 1.0 / (1.0 + np.exp(-Z))
return A
def backward(self, dA, A):
dZ = A * (1 - A) * dA
return dZ
class tanh:
def forward(self, Z):
A = np.tanh(Z)
return A
def backward(self, dA, A):
dZ = (1.0 - np.square(A)) * dA
return dZ
class relu:
def forward(self, Z):
A = np.maximum(Z, 0)
return A
def backward(self, dA, Z):
dZ = dA
dZ[Z < 0] = 0
return dZ
```
#### File: deep learning/function/linear_function.py
```python
import numpy as np
class linear:
def forward(self, X, W, b):
Z = np.dot(X, W) + b
return Z
def backward(self, dZ, X, W, b, m):
dW = (1.0 / m) * np.dot(X.T, dZ)
db = (1.0 / m) * np.sum(dZ, axis=0, keepdims=True)
dX = np.dot(dZ, W.T)
return dX, dW, db
```
#### File: deep learning/function/model.py
```python
import numpy as np
from initialization_function import initialization
from linear_function import linear
from activation_function import relu
from dropout_function import dropout
from output_function import softmax
from regularization_function import l2
from optimization_function import adam
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import sys
sys.path.append("C:\\Users\\jmei\\Documents\\Code\\machine learning from scratch\\utili")
from preprocess import one_hot_vector
class vanilla_nural_network:
def __init__(self, hidden_layer_dims):
self.layer_dims = hidden_layer_dims
self.W = []
self.b = []
self.loss = []
self.accuracy = []
def train(self, X, Y, iteration, learning_rate, lambd = 0, keep_prob = 1, interrupt_threshold = 0.1, print_loss = True):
# import function
init = initialization(self.layer_dims)
lin = linear()
act = relu()
drop = dropout(keep_prob)
classifier = softmax()
regulator = l2(lambd)
optimizer = adam(learning_rate)
# initialization
counter = 0
train_X, validation_X, train_Y, validation_Y = train_test_split(X, Y, test_size=0.2, shuffle=True)
self.W, self.b = init.he()
# iteration
for i in range(iteration):
m = Y.shape[0]
# forward
A = train_X
cache = [[None, A, None]]
for l in range(len(self.layer_dims)-1):
Z = lin.forward(A, self.W[l], self.b[l])
A = act.forward(Z)
A, D = drop.forward(A)
cache.append([Z, A, D])
# loss
prob = classifier.forward(A)
loss_tmp1 = classifier.loss(train_Y, prob)
loss_tmp2 = regulator.loss(self.W, m)
loss_tmp = loss_tmp1 + loss_tmp2
self.loss.append(loss_tmp)
# validation accuracy
pred = self.predict(validation_X)
pred = one_hot_vector.decoder(pred)
acc_tmp = np.mean(validation_Y == pred)
self.accuracy.append(acc_tmp)
# print
if print_loss and i % 1000 == 0:
print("Iteration %i, Loss: %f, Accuracy: %.f%%" %(i, loss_tmp, acc_tmp*100))
if loss_tmp <= interrupt_threshold:
print("Iteration %i, Loss: %f <= Threshold: %f" %(i, loss_tmp, interrupt_threshold))
return
# backward
dA = classifier.backward(train_Y, prob)
for l in range(len(self.layer_dims)-1, 1, -1):
dA = drop.backward(dA, cache[l][2])
dZ = act.backward(dA, cache[l][0])
dA, dW, db = lin.backward(dZ, cache[l-1][1], self.W[l-1], self.b[l-1], m)
dW += regulator.backward(self.W[l-1], m)
# update
counter += 1
self.W[l-1], self.b[l-1] = optimizer.update([self.W[l-1], self.b[l-1]], [dW, db], counter)
def predict(self, X):
lin = linear()
act = relu()
classifier = softmax()
A = X
for l in range(len(self.layer_dims)-1):
Z = lin.forward(A, self.W[l], self.b[l])
A = act.forward(Z)
Y_het = classifier.forward(A)
return Y_het
def plot(self):
plt.figure()
plt.grid()
plt.plot(self.loss)
```
#### File: deep learning/function/output_function.py
```python
import numpy as np
class softmax:
def forward(self, A):
A = np.exp(A - np.max(A, axis=1, keepdims=True))
Y_het = A / np.sum(A, axis=1, keepdims=True)
return Y_het
def loss(self, Y, Y_het):
m = Y.shape[0]
np.clip(Y_het, 1e-100, 1-(1e-100))
log_likelihood = -np.log(Y_het[range(m), Y])
loss = (1.0 / m) * np.sum(log_likelihood)
loss = np.squeeze(loss)
return loss
def backward(self, Y, Y_het):
m = Y.shape[0]
Y_het[range(m), Y] -= 1
dA = (1.0 / m) * Y_het
return dA
```
#### File: deep learning/function/padding_function.py
```python
import numpy as np
class padding:
def __init__(self):
pass
def zero(self, X, pad):
"""
:return: numpy array of shape (m, n_H+2*pad, n_W+2*pad, n_C)
"""
X_pad = np.pad(X, ((0,0),(pad,pad),(pad,pad),(0,0)), 'constant', constant_values = 0)
return X_pad
def constant(self, X, pad,const):
"""
:param const: integer of the value of pad
:return: numpy array of shape (m, n_H+2*pad, n_W+2*pad, n_C)
"""
X_pad = np.pad(X, ((0, 0), (pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=const)
return X_pad
```
#### File: machine-learning-from-scratches/reinfoecement learning (to do)/MontaCarlo.py
```python
import numpy as np
import pandas as pd
class MontaCarloAgent:
def __init__(self, actions,
learning_rate = 0.01,
discount_rate = 0.9,
greedy_rate = 0.9):
# actions is a set of action
self.actions = actions
self.alpha = learning_rate
self.gamma = discount_rate
self.epsilon = greedy_rate
self.q_table = pd.DataFrame(columns=self.actions)
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(pd.Series(
[0]*len(self.actions),
index=self.q_table.columns,
name=state,))
def pick_action(self, observation):
self.check_state_exist(observation)
# e-greedy selection
if np.random.uniform() < self.epsilon:
state_action = self.q_table.ix[observation, :]
# some actions have same value
# randomly permute(置换) a sequence, or return a permuted range
state_action = state_action.reindex(np.random.permutation(state_action.index))
action = state_action.argmax()
else:
action = np.random.choice(self.actions)
return action
def learn(self, state, action, reward, next_state):
self.check_state_exist(next_state)
q_predict = self.q_table.ix[state, action]
if next_state != 'terminal':
q_target = reward + self.gamma * self.q_table.ix[next_state, :].max()
else:
q_target = reward
error = q_target - q_predict
self.q_table.ix[state, action] += self.alpha * error
```
#### File: machine-learning-from-scratches/utili/loss_function.py
```python
import numpy as np
class mse:
def forward(self, A):
Y_het = A
return Y_het
def loss(self, Y, Y_het):
loss = np.mean(np.power((Y - Y_het), 2))
loss = np.squeeze(loss)
return loss
def backward(self, Y, Y_het):
dA = Y - Y_het
return dA
class rmse:
def forward(self, A):
Y_het = A
return Y_het
def loss(self, Y, Y_het):
loss = np.sqrt(np.mean(np.power((Y - Y_het), 2)))
loss = np.squeeze(loss)
return loss
def backward(self, Y, Y_het):
dA = Y - Y_het
return dA
class cross_entropy:
def forward(self, A):
Y_het = A
return Y_het
def loss(self, Y, Y_het):
m = Y.shape[0]
log_likelihood = np.multiply(np.log(Y_het), Y) + np.multiply((1 - Y), np.log(1 - Y_het))
loss = - np.sum(logprobs) / m
loss = np.squeeze(loss)
return loss
def backward(self, Y, Y_het):
dA = - (np.divide(Y, Y_het) - np.divide(1 - Y, 1 - Y_het))
return dA
``` |
{
"source": "jie-mei/NLI",
"score": 3
} |
#### File: NLI/src/embed.py
```python
from abc import ABC, abstractmethod
import os
import typing as t
import gensim
from gensim.scripts.glove2word2vec import glove2word2vec
import numpy as np
import spacy
from util.log import exec_log as log
class OOVError(Exception):
""" This error indicates the querying word is out-of-vocabulary. """
pass
class WordEmbedding(ABC):
""" An abstract collection of embeded words.
Attributes:
dim: The number of dimensions for each embedding vector.
"""
def __init__(self, dim: int, vocab: t.Set[str] = set()) -> None:
self.dim = dim
@abstractmethod
def get(self, word: str) -> np.ndarray:
""" Get the embedding of the given word. """
pass
class IndexedWordEmbedding(WordEmbedding):
""" A collection of embedding words with unique IDs for looking up.
Attributes:
ids (Dict[str, int]): A mapping from word to id.
embeds (np.array): A two dimentional array, where `embed[i]` stores the
embedding for the word with id `i`.
"""
def __init__(self,
vocab: t.List[str],
embedding: WordEmbedding,
oov_embed_fn: t.Callable[[str], np.ndarray],
) -> None:
super(IndexedWordEmbedding, self).__init__(embedding.dim)
# Copy embeddings from the given word embedding object.
self.__ids, self.__embeds = {}, None # type: t.Dict[str, int], np.ndarray
self.__is_oov = [False] * len(vocab)
if vocab:
embeds = [] # type: t.List[str]
for wid, w in enumerate(vocab):
try:
embed = embedding.get(w)
except OOVError:
embed = oov_embed_fn(w)
self.__is_oov[wid] = True
self.__ids[w] = wid
embeds.append(embed)
self.__embeds = np.vstack(embeds)
def get_embeddings(self) -> np.ndarray:
return self.__embeds
def get_id(self, word: str) -> int:
try:
return self.__ids[word]
except KeyError as e:
raise type(e)(str(e) +
' This embedding object is incompetible with the query.')
def __get_word_id(self, word: t.Union[str, int]) -> int:
return word if isinstance(word, int) else self.get_id(word)
def is_oov(self, word: t.Union[str, int]) -> bool:
return self.__is_oov[self.__get_word_id(word)]
def get(self, word: t.Union[str, int]) -> np.array:
return self.__embeds[self.__get_word_id(word)]
class PretrainedEmbedding(WordEmbedding):
""" Pretrained word embeddings. """
def __init__(self, path, dim, binary):
super(PretrainedEmbedding, self).__init__(dim)
self.path = path
self.__binary = binary
self.__model = None
log.info('Read pretrained %s embedding from file: %s' %
(self.__class__.__name__, self.path))
self.__model = gensim.models.KeyedVectors.load_word2vec_format(
self.path, binary=self.__binary)
def get(self, word):
if not self.__model:
self.__load_model()
if word in self.__model.vocab:
return self.__model.word_vec(word)
else:
raise OOVError
class Word2Vec(PretrainedEmbedding):
def __init__(self,
path='/home/jmei/data/GoogleNews-vectors-negative300.bin',
dim=300,
binary=True,
**kwargs):
super(Word2Vec, self).__init__(path, dim, binary, **kwargs)
class GloVe(PretrainedEmbedding):
def __init__(self,
path='/home/jmei/data/glove.840B.300d.txt',
dim=300,
binary=False,
**kwargs):
# Preprocess the original GloVe data to allow using the gensim API.
gensim_path = '{}.gensim.txt'.format(path[:-4])
if not os.path.exists(gensim_path):
glove2word2vec(path, gensim_path)
super(GloVe, self).__init__(gensim_path, dim, binary, **kwargs)
class FastText(PretrainedEmbedding):
def __init__(self,
path='/home/xjiang/data/crawl-300d-2M.vec',
dim=300,
binary=False,
**kwargs):
super(FastText, self).__init__(path, dim, binary, **kwargs)
class GloVeNorm(GloVe):
def __init__(self,
glove_path='/home/jmei/data/glove.840B.300d.txt',
path='/home/jmei/data/glove.840B.300d.norm.txt',
dim=300,
binary=False,
**kwargs):
if not os.path.exists(path):
# Preprocess the original GloVe data to allow using the gensim API.
log.info('Generate normalized GloVe embeddings to file: %s' % path)
with open(glove_path, 'r') as in_file:
with open(path, 'w') as out_file:
for l in in_file:
fields = l.split(' ')
name = fields[:-300]
embed = list(map(float, fields[-300:]))
embed_norm = embed / np.linalg.norm(embed)
out_file.write(' '.join(name) + ' ' +
' '.join(map(str, embed_norm)) + '\n')
super(GloVeNorm, self).__init__(path, dim, binary, **kwargs)
class SpacyGloVe(WordEmbedding):
def __init__(self):
super(GloVe, self).__init__(300)
self.nlp = spacy.load('en_vectors_web_lg')
def get(self, word):
return self.nlp.vocab.get_vector(word)
def init(embedding_name: str, *args, **kwargs) -> PretrainedEmbedding:
""" Construct a pretrained word embedding object given its name. """
return globals()[embedding_name](*args, **kwargs)
```
#### File: NLI/src/_eval_data.py
```python
import data
import numpy as np
LIMIT = 20
def _print_word_embed(dataset, word):
embed = dataset.word_embedding
wid = embed.get_id(word)
wemb = embed.get(word)
is_oov = embed.is_oov(word)
size = np.linalg.norm(wemb)
print('%10d %20s %s %4.2f %s' % (wid, word, 'OOV' if is_oov else ' ', size, wemb[:3]))
def print_embed(data_cls, mode, embed_cls, seed):
dataset = data.load_dataset(data_cls, mode, embed_cls, seed)
print(dataset.x1_words[0])
embed = dataset.word_embedding
for i, (word, wid) in enumerate(embed._IndexedWordEmbedding__ids.items()):
if i < LIMIT:
wemb = embed.get(word)
is_oov = embed.is_oov(word)
size = np.linalg.norm(wemb)
print('%10d %20s %s %4.2f %s' % (wid, word, 'OOV' if is_oov else ' ', size, wemb[:3]))
if '<EOS>' in embed._IndexedWordEmbedding__ids:
_print_word_embed(dataset, '<EOS>')
if '<BOS>' in embed._IndexedWordEmbedding__ids:
_print_word_embed(dataset, '<BOS>')
def print_tag(data_cls, mode, embed_cls, seed):
dataset = data.load_dataset(data_cls, mode, embed_cls, seed)
for i in range(3):
words = dataset.x2_words[i]
tags = dataset.x2_feats[i][1]
print([(w, t) for w, t in zip(words, tags)])
if __name__ == '__main__':
# Print
print_tag('SNLI', 'test', 'GloVe', 6523)
print()
print_tag('SNLI', 'test', 'GloVeNorm', 6523)
print()
print_tag('SNLI', 'train', 'GloVeNorm', 6523)
print()
print()
print()
```
#### File: src/nn/base.py
```python
from abc import ABC
import tensorflow as tf
import numpy as np
import data
from util.display import ReprMixin
from util.log import exec_log as log
WORD_SEQ_LEN = 16
class Model(ReprMixin, ABC):
""" A base NN model to conduct pairwised text analysis. """
def __init__(self):
with tf.name_scope('input'):
self.handle = tf.placeholder(tf.string, shape=[], name='handle')
self.data_iterator = tf.data.Iterator.from_string_handle(
string_handle=self.handle,
output_types=(tf.int32,) * 11,
output_shapes=(tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([None, None, WORD_SEQ_LEN]),
tf.TensorShape([None, None, WORD_SEQ_LEN]),
tf.TensorShape([None, None, 4]),
tf.TensorShape([None, None, 4]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None])))
iter_next = self.data_iterator.get_next()
self.x1 = tf.identity(iter_next[0], name='id1')
self.x2 = tf.identity(iter_next[1], name='id2')
self.y = tf.identity(iter_next[2], name='y')
self.len1 = tf.identity(iter_next[3], name='len1')
self.len2 = tf.identity(iter_next[4], name='len2')
self.char1 = tf.identity(iter_next[5], name='char1')
self.char2 = tf.identity(iter_next[6], name='char2')
self.temp1 = tf.identity(iter_next[7], name='temp1')
self.temp2 = tf.identity(iter_next[8], name='temp2')
self.tag1 = tf.identity(iter_next[9], name='tag1')
self.tag2 = tf.identity(iter_next[10], name='tag2')
self.keep_prob = tf.placeholder(tf.float32, shape=[])
self.is_training = tf.placeholder(tf.bool, shape=[])
def count_parameters(self):
total = 0
for var in tf.trainable_variables():
num = 1
for dim in var.get_shape():
num *= int(dim)
total += num
return total
class SoftmaxCrossEntropyMixin(ABC):
def evaluate_and_loss(self, y_hat):
with tf.name_scope('predict'):
probs = tf.nn.softmax(y_hat)
self.prediction = tf.argmax(probs, axis=1, output_type=tf.int32)
with tf.name_scope('accuracy'):
self.performance = tf.reduce_mean(
tf.cast(tf.equal(self.prediction, self.y), tf.float32))
with tf.name_scope('loss'):
#labels = tf.one_hot(self.y, self._class_num, dtype=tf.float32)
ce_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=y_hat, labels=self.y))
rglz_loss = tf.reduce_sum(tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES))
self.loss = tf.add(ce_loss, rglz_loss)
class WeightedSoftmaxCrossEntropyMixin(ABC):
def evaluate_and_loss(self, y_hat, y_weights):
with tf.name_scope('predict'):
probs = tf.nn.softmax(y_hat)
self.prediction = tf.argmax(probs, axis=1, output_type=tf.int32)
with tf.name_scope('accuracy'):
self.performance = tf.reduce_mean(
tf.cast(tf.equal(self.prediction, self.y), tf.float32))
with tf.name_scope('loss'):
y_w = tf.constant(y_weights, dtype=tf.float32, name='class_weights')
#labels = tf.one_hot(self.y, self._class_num, dtype=tf.float32)
ce_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=y_hat * y_w, labels=self.y))
rglz_loss = tf.reduce_sum(tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES))
self.loss = tf.add(ce_loss, rglz_loss)
```
#### File: src/nn/decomposable.py
```python
import typing as t
import numpy as np
import tensorflow as tf
import embed
import op
import data
from nn.base import Model, SoftmaxCrossEntropyMixin
from util.log import exec_log as log
class Decomposable(SoftmaxCrossEntropyMixin, Model):
def __init__(self,
embeddings: embed.IndexedWordEmbedding,
class_num: int,
project_dim: int = 200,
intra_attention: bool = False,
bias_init: float = 0,
) -> None:
super(Decomposable, self).__init__()
self.project_dim = project_dim
self.intra_attention = intra_attention
self.bias_init = bias_init
self._class_num = class_num
self.keep_prob = tf.placeholder(tf.float32, shape=[])
with tf.variable_scope('embed') as s:
embed = tf.constant(embeddings.get_embeddings(),
dtype=tf.float32,
name='embeddings')
x1, x2 = map(lambda x: tf.gather(embed, x), [self.x1, self.x2])
project = lambda x: self.linear(x, self.project_dim, bias=False)
x1, x2 = map(project, [x1, x2])
x1, x2 = self.intra(x1, x2) if intra_attention else (x1, x2)
with tf.variable_scope('attent') as s:
sim = self.attention(x1, x2)
self.attent_map = sim # for visualization
alpha = tf.matmul(tf.nn.softmax(tf.matrix_transpose(sim)), x1)
beta = tf.matmul(tf.nn.softmax(sim), x2)
with tf.variable_scope('compare') as s:
v1 = self.forward(tf.concat([x1, beta ], 2))
v2 = self.forward(tf.concat([x2, alpha], 2))
with tf.variable_scope('aggregate') as s:
v1 = tf.reduce_sum(v1, axis=1)
v2 = tf.reduce_sum(v2, axis=1)
y_hat = self.forward(tf.concat([v1, v2], 1))
y_hat = self.linear(y_hat, dim=self._class_num)
self.evaluate_and_loss(y_hat)
def linear(self, inputs: tf.Tensor, dim: int, bias=True):
return op.linear(inputs, dim,
keep_prob=1.0,
activation_fn=None,
bias=bias)
def forward(self,
inputs: tf.Tensor,
scope: t.Union[str, tf.VariableScope] = None,
):
scope = scope if scope else 'forward'
op_kwargs = {
'keep_prob': self.keep_prob,
'activation_fn': tf.nn.relu
}
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
t = op.linear(inputs,
dim=self.project_dim,
scope='linear-1',
**op_kwargs)
t = op.linear(t, scope='linear-2', **op_kwargs)
return t
def post_project(self, x1, x2):
return x1, x2
def attention(self,
x1: tf.Tensor,
x2: tf.Tensor,
scope: t.Union[str, tf.VariableScope] = None,
):
"""
Inputs: [batch, seq_len_1, embed_dim]
[batch, seq_len_2, embed_dim]
Returns: [batch, seq_len_1, seq_len_2]
"""
with tf.name_scope('attention') as s:
x1 = self.forward(x1)
x2 = self.forward(x2)
return tf.matmul(x1, tf.matrix_transpose(x2))
def intra(self, x1, x2):
""" Intra-attention layer. """
with tf.variable_scope('intra') as s:
def attent(x):
with tf.variable_scope('distance_bias', reuse=tf.AUTO_REUSE):
idx = tf.range(0, tf.shape(x)[1], 1)
dist = tf.abs(tf.expand_dims(idx, 0) - tf.expand_dims(idx, 1))
bias = tf.get_variable('bias', [1])
bias *= tf.cast(dist >= 10, tf.float32)
bias = tf.expand_dims(bias, 0)
att = self.attention(x, x)
pan = tf.nn.softmax(att + bias)
xp = tf.einsum('bik,bkj->bij', pan, x)
return tf.concat([x, xp], 2)
return map(attent, [x1, x2])
class DecomposableMod(SoftmaxCrossEntropyMixin, Model):
def __init__(self,
embeddings: embed.IndexedWordEmbedding,
class_num: int,
project_dim: int = 200,
intra_attention: bool = False,
bias_init: float = 0,
char_filer_width: int = 5,
char_embed_dim: int = 8,
char_conv_dim: int = 100,
lstm_unit: int = 300,
) -> None:
super(DecomposableMod, self).__init__()
self._class_num = class_num
self.project_dim = project_dim
self.intra_attention = intra_attention
self.bias_init = bias_init
self.lstm_unit = lstm_unit
self.char_filter_width = char_filer_width
self.char_embed_dim = char_embed_dim
self.char_conv_dim = char_conv_dim
self.keep_prob = tf.placeholder(tf.float32, shape=[])
with tf.variable_scope('embed', reuse=tf.AUTO_REUSE) as s:
# Word pretrained embeddings (300D)
word_embed = tf.constant(embeddings.get_embeddings(),
dtype=tf.float32,
name='word_embed')
word_embed1, word_embed2 = map(lambda x: tf.gather(word_embed, x),
[self.x1, self.x2])
# Tag one-hot embeddings (72D)
#def embed_tags(x_ids, x_tags):
# tag_weight = op.get_variable('tag_weight',
# shape=(data.SNLI.TAGS, 1))
# x_tags = x_tags[:,:tf.shape(x_ids)[1]]
# return tf.gather(tag_weight, x_tags)
# # shape: [batch, seq_len, 1]
#tag_embed1, tag_embed2 = map(embed_tags,
# *zip((self.x1, self.tag1), (self.x2, self.tag2)))
def embed_tags(x_ids, x_tags, x_len):
x_tags *= tf.sequence_mask(x_len, tf.shape(x_tags)[1],
dtype=tf.int32)
# shape: [batch, seq_len]
tag_embed = tf.one_hot(x_tags, data.SNLI.TAGS,
dtype=tf.float32,
name='tag_embed')
return tag_embed[:,:tf.shape(x_ids)[1]]
tag_embed1, tag_embed2 = map(embed_tags,
*zip((self.x1, self.tag1, self.len1),
(self.x2, self.tag2, self.len2)))
# Merge embeddings
#x1 = tf.concat([word_embed1, char_embed1, tag_embed1], 2)
#x2 = tf.concat([word_embed2, char_embed2, tag_embed2], 2)
x1 = tf.concat([word_embed1, tag_embed1], 2)
x2 = tf.concat([word_embed2, tag_embed2], 2)
#x1 = word_embed1 * tag_embed1
#x2 = word_embed2 * tag_embed2
#import pdb; pdb.set_trace()
x1, x2 = self.intra(x1, x2) if intra_attention else (x1, x2)
with tf.variable_scope('encode', reuse=tf.AUTO_REUSE) as s:
def lstm_encode(x):
# shape: [batch, seq_len, embed_dim]
(outputs_fw, outputs_bw), (states_fw, states_bw) = \
tf.nn.bidirectional_dynamic_rnn(
cell_fw=tf.nn.rnn_cell.LSTMCell(self.lstm_unit),
cell_bw=tf.nn.rnn_cell.LSTMCell(self.lstm_unit),
inputs=x,
dtype=tf.float32)
outputs = tf.concat([outputs_fw, outputs_bw], 2)
return tf.nn.dropout(outputs, self.keep_prob)
# shape: [batch, seq_len, embed_dim * 2]
x1, x2 = map(lstm_encode, [x1, x2])
with tf.variable_scope('attent') as s:
#sim = self.attention(x1, x2)
sim = tf.matmul(x1, tf.matrix_transpose(x2))
alpha = tf.matmul(tf.nn.softmax(tf.matrix_transpose(sim)), x1)
beta = tf.matmul(tf.nn.softmax(sim), x2)
with tf.variable_scope('compare') as s:
v1 = self.forward(tf.concat([x1, beta ], 2))
v2 = self.forward(tf.concat([x2, alpha], 2))
with tf.variable_scope('aggregate') as s:
v1 = tf.reduce_sum(v1, axis=1)
v2 = tf.reduce_sum(v2, axis=1)
y_hat = self.forward(tf.concat([v1, v2], 1))
y_hat = self.linear(y_hat, dim=self._class_num)
self.evaluate_and_loss(y_hat)
def linear(self, inputs: tf.Tensor, dim: int, bias=True):
return op.linear(inputs, dim,
keep_prob=1.0,
activation_fn=None,
bias=bias)
def forward(self,
inputs: tf.Tensor,
scope: t.Union[str, tf.VariableScope] = None,
):
scope = scope if scope else 'forward'
op_kwargs = {
'keep_prob': self.keep_prob,
'activation_fn': tf.nn.relu
}
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
t = op.linear(inputs,
dim=self.project_dim,
scope='linear-1',
**op_kwargs)
t = op.linear(t, scope='linear-2', **op_kwargs)
return t
def post_project(self, x1, x2):
return x1, x2
def attention(self,
x1: tf.Tensor,
x2: tf.Tensor,
scope: t.Union[str, tf.VariableScope] = None,
):
"""
Inputs: [batch, seq_len_1, embed_dim]
[batch, seq_len_2, embed_dim]
Returns: [batch, seq_len_1, seq_len_2]
"""
with tf.name_scope('attention') as s:
x1 = self.forward(x1)
x2 = self.forward(x2)
return tf.matmul(x1, tf.matrix_transpose(x2))
def intra(self, x1, x2):
""" Intra-attention layer. """
with tf.variable_scope('intra') as s:
def attent(x):
with tf.variable_scope('distance_bias', reuse=tf.AUTO_REUSE):
idx = tf.range(0, tf.shape(x)[1], 1)
dist = tf.abs(tf.expand_dims(idx, 0) - tf.expand_dims(idx, 1))
bias = tf.get_variable('bias', [1])
bias *= tf.cast(dist >= 10, tf.float32)
bias = tf.expand_dims(bias, 0)
att = self.attention(x, x)
pan = tf.nn.softmax(att + bias)
xp = tf.einsum('bik,bkj->bij', pan, x)
return tf.concat([x, xp], 2)
return map(attent, [x1, x2])
```
#### File: src/nn/diin.py
```python
import typing as t
import numpy as np
import tensorflow as tf
import embed
import data
import op
import nn
from nn.base import Model, SoftmaxCrossEntropyMixin
from util.log import exec_log as log
from util.debug import *
class DIIN(SoftmaxCrossEntropyMixin, Model):
def __init__(self,
embeddings: embed.IndexedWordEmbedding,
class_num: int,
scale_l1: float = 0.0,
scale_l2: float = 0.000001,
encode_dim: int = 300,
fact_intr_dim: int = 10,
fact_proj_dim: int = -1,
char_filer_width: int = 5,
char_embed_dim: int = 8,
char_conv_dim: int = 100,
lstm_unit: int = 300,
) -> None:
super(DIIN, self).__init__()
self._class_num = class_num
self.scale_l1 = scale_l1
self.scale_l2 = scale_l2
self.encode_dim = encode_dim
self.fact_proj_dim = fact_proj_dim
self.fact_intr_dim = fact_intr_dim
self.char_filter_width = char_filer_width
self.char_embed_dim = char_embed_dim
self.char_conv_dim = char_conv_dim
self.lstm_unit = lstm_unit
self.keep_prob = tf.placeholder(tf.float32, shape=[])
batch_size = tf.shape(self.x1)[0]
padded_len1 = tf.shape(self.x1)[1]
padded_len2 = tf.shape(self.x2)[1]
op_kwargs = {'scale_l1': self.scale_l1,
'scale_l2': self.scale_l2,
'keep_prob': self.keep_prob}
with tf.variable_scope('embed') as s:
# Word pretrained embeddings (300D)
word_embed = tf.constant(embeddings.get_embeddings(),
dtype=tf.float32,
name='word_embed')
word_embed1, word_embed2 = map(lambda x: tf.gather(word_embed, x),
[self.x1, self.x2])
# Character convolutional embeddings (`char_conv_dim`D)
char_embed = op.get_variable('char_embed',
shape=(256, char_embed_dim))
char_filter = op.get_variable('char_filter',
shape=(1, self.char_filter_width, self.char_embed_dim,
self.char_conv_dim))
def embed_chars(x_char):
embed = tf.gather(char_embed, x_char)
# shape: [batch, seq_len, word_len, embed_dim]
conv = tf.nn.conv2d(embed, char_filter, [1, 1, 1, 1], 'VALID')
# shape: [batch, seq_len, word_len - filter_width + 1, conv_dim]
return tf.reduce_max(conv, 2)
# shape: [batch, seq_len, conv_dim]
char_embed1, char_embed2 = map(embed_chars, [self.char1, self.char2])
# Tag one-hot embeddings (72D)
def embed_tags(x_ids, x_tags, x_len):
x_tags *= tf.sequence_mask(x_len, tf.shape(x_tags)[1],
dtype=tf.int32)
# shape: [batch, seq_len]
tag_embed = tf.one_hot(x_tags, data.SNLI.TAGS,
dtype=tf.float32,
name='char_embed')
return tag_embed[:,:tf.shape(x_ids)[1]]
tag_embed1, tag_embed2 = map(embed_tags,
*zip((self.x1, self.tag1, self.len1),
(self.x2, self.tag2, self.len2)))
# Merge embeddings
x1 = tf.concat([word_embed1, char_embed1, tag_embed1], 2)
x2 = tf.concat([word_embed2, char_embed2, tag_embed2], 2)
with tf.variable_scope('encode') as s:
# Highway encoding
def highway_encode(x):
x = op.highway(x, scope='hw-1', dim=self.encode_dim, **op_kwargs)
x = op.highway(x, scope='hw-2', dim=self.encode_dim, **op_kwargs)
return x
x1, x2 = map(highway_encode, [x1, x2])
# shape: [batch, seq_len, encode_dim]
# Self-attention
def self_attent(x, padded_len):
t1 = tf.tile(tf.expand_dims(x, 2), [1, 1, tf.shape(x)[1], 1])
t2 = tf.tile(tf.expand_dims(x, 1), [1, tf.shape(x)[1], 1, 1])
# shape: [batch, seq_len, seq_len, encode_dim]
t = tf.reshape(tf.concat([t1, t2, t1 * t2], 3),
[batch_size, padded_len ** 2, 3 * self.encode_dim])
# shape: [batch, seq_len^2, 3 * encode_dim]
att = op.linear(t, dim=1, bias=None, activation_fn=None)
# shape: [batch, seq_len^2, 1]
att = tf.reshape(att, [batch_size, padded_len, padded_len])
# shape: [batch, seq_len, seq_len]
soft_align = tf.einsum('bik,bkj->bij', tf.nn.softmax(att), x)
return op.gated_fuse(x, soft_align)
# shape: [batch, seq_len, encode_dim]
x1, x2 = map(lambda x, l: self_attent(x, l),
*zip((x1, padded_len1), (x2, padded_len2)))
with tf.variable_scope('interact') as s:
inter = tf.expand_dims(x1, 2) * tf.expand_dims(x2, 1)
# shape: [batch, seq_len1, seq_len2, encode_dim]
with tf.variable_scope('extract') as s:
# Dense Net
feats = op.conv2d(inter, self.encode_dim * 0.3, 1)
# shape: [batch, seq_len1, seq_len2, encode_dim]
feats = self.dense_block(feats, 'dense-block-1')
feats = self.dense_trans(feats, 'dense-trans-1')
feats = self.dense_block(feats, 'dense-block-2')
feats = self.dense_trans(feats, 'dense-trans-2')
feats = self.dense_block(feats, 'dense-block-3')
feats = self.dense_trans(feats, 'dense-trans-3')
shape = tf.shape(feats)
feats = tf.reshape(feats, [shape[0], shape[1] * shape[2] * shape[3]])
self.evaluate_and_loss(feats)
def dense_block(self,
feats,
scope,
num_layers: int = 8,
growth_rate: int = 20,
kernel_size: int = 3):
with tf.variable_scope(scope):
for i in range(num_layers):
new_feats = op.conv2d(feats, growth_rate,
(kernel_size, kernel_size), scope='conv2d-%d' % i)
feats = tf.concat([feats, new_feats], 3)
return feats
def dense_trans(self,
feats,
scope,
transition_rate: float = 0.5,):
with tf.variable_scope(scope):
out_dim = int(int(feats.shape[-1]) * transition_rate)
feats = op.conv2d(feats, out_dim, 1, activation_fn=None)
feats = tf.nn.max_pool(feats, [1,2,2,1], [1,2,2,1], 'VALID')
return feats
```
#### File: src/nn/esim.py
```python
import typing as t
import numpy as np
import tensorflow as tf
import embed
import data
import op
import nn
from nn.base import Model, WeightedSoftmaxCrossEntropyMixin
from util.log import exec_log as log
from util.debug import *
class ESIM(WeightedSoftmaxCrossEntropyMixin, Model):
def __init__(self,
embeddings: embed.IndexedWordEmbedding,
class_num: int,
scale_l1: float = 0.0,
scale_l2: float = 0.0,
lstm_unit: int = 300,
seq_len: int = 0,
char_filer_width: int = 5,
char_embed_dim: int = 8,
char_conv_dim: int = 100,
class_weights: t.List[float] = [1.1, 1, 1],
) -> None:
super(ESIM, self).__init__()
self._class_num = class_num
self.class_weights = class_weights
self.scale_l1 = scale_l1
self.scale_l2 = scale_l2
self.lstm_unit = lstm_unit
self.seq_len = seq_len
self.char_filter_width = char_filer_width
self.char_embed_dim = char_embed_dim
self.char_conv_dim = char_conv_dim
op_kwargs = {'scale_l1': self.scale_l1,
'scale_l2': self.scale_l2,
'keep_prob': self.keep_prob,
'drop_after': False}
with tf.variable_scope('embed') as s:
def set_seq_len(x):
x_len = tf.shape(x)[1]
return tf.cond(
tf.less(self.seq_len, x_len),
lambda: x[:,:self.seq_len],
lambda: tf.pad(x, [[0, 0], [0, self.seq_len - x_len]]))
if self.seq_len > 0:
x1, x2 = map(set_seq_len, [self.x1, self.x2])
else:
x1, x2 = self.x1, self.x2
#embed_init_var = embeddings.get_embeddings()
#embed = op.get_variable('embeddings',
# shape=embed_init_var.shape,
# initializer=tf.constant_initializer(embed_init_var))
#embed = tf.constant(embeddings.get_embeddings(),
# dtype=tf.float32,
# name='embeddings')
#x1, x2 = map(lambda x: tf.gather(embed, x), [x1, x2])
# Word pretrained embeddings (300D)
word_embed = tf.constant(embeddings.get_embeddings(),
dtype=tf.float32,
name='word_embed')
word_embed1, word_embed2 = map(lambda x: tf.gather(word_embed, x),
[self.x1, self.x2])
embed_dim = word_embed.get_shape()[-1]
# Character convolutional embeddings (`char_conv_dim`D)
char_embed = op.get_variable('char_embed',
shape=(256, char_embed_dim))
char_filter = op.get_variable('char_filter',
shape=(1, self.char_filter_width, self.char_embed_dim,
self.char_conv_dim))
def embed_chars(x_char):
embed = tf.gather(char_embed, x_char)
# shape: [batch, seq_len, word_len, embed_dim]
conv = tf.nn.conv2d(embed, char_filter, [1, 1, 1, 1], 'VALID')
# shape: [batch, seq_len, word_len - filter_width + 1, conv_dim]
return tf.reduce_max(conv, 2)
# shape: [batch, seq_len, conv_dim]
char_embed1, char_embed2 = map(embed_chars, [self.char1, self.char2])
# Tag one-hot embeddings (72D)
def embed_tags(x_ids, x_tags, x_len):
x_tags *= tf.sequence_mask(x_len, tf.shape(x_tags)[1],
dtype=tf.int32)
# shape: [batch, seq_len]
tag_embed = tf.one_hot(x_tags, data.SNLI.TAGS,
dtype=tf.float32,
name='char_embed')
return tag_embed[:,:tf.shape(x_ids)[1]]
tag_embed1, tag_embed2 = map(embed_tags,
*zip((self.x1, self.tag1, self.len1),
(self.x2, self.tag2, self.len2)))
# Merge embeddings
#x1 = tf.concat([word_embed1, char_embed1, tag_embed1], 2)
#x2 = tf.concat([word_embed2, char_embed2, tag_embed2], 2)
x1 = tf.concat([word_embed1, char_embed1], 2)
x2 = tf.concat([word_embed2, char_embed2], 2)
x1 = self.unfold_tree(x1, self.temp1, self.tag1, self.len1, 'x1')
x2 = self.unfold_tree(x2, self.temp2, self.tag2, self.len2, 'x2')
with tf.variable_scope('encode', reuse=tf.AUTO_REUSE) as s:
x1, x2 = map(lambda x: tf.nn.dropout(x, self.keep_prob), [x1, x2])
#import pdb; pdb.set_trace()
x1, x2 = map(self.bilstm, [x1, x2])
# shape: [batch, seq_len, embed_dim * 2]
with tf.variable_scope('attent') as s:
sim = tf.matmul(x1, tf.matrix_transpose(x2))
alpha = tf.matmul(tf.nn.softmax(tf.matrix_transpose(sim)), x1)
beta = tf.matmul(tf.nn.softmax(sim), x2)
x1 = tf.concat([x1, beta, x1 * beta, x1 - beta ], 2)
x2 = tf.concat([x2, alpha, x2 * alpha, x2 - alpha], 2)
# shape: [batch, seq_len, embed_dim * 8]
with tf.variable_scope('decode', reuse=tf.AUTO_REUSE) as s:
x1, x2 = map(lambda x: op.linear(x, dim=embed_dim, **op_kwargs),
[x1, x2])
# NOTE: dropout here in the author's code
# shape: [batch, seq_len, embed_dim]
x1, x2 = map(self.bilstm, [x1, x2])
# shape: [batch, seq_len, embed_dim * 2]
with tf.variable_scope('aggregate') as s:
def pool(x):
return tf.concat([
tf.reduce_sum(x, axis=1),
tf.reduce_max(x, axis=1)
], 1)
y_hat = op.linear(tf.concat([pool(x1), pool(x2)], 1),
dim=embed_dim,
activation_fn=tf.nn.tanh,
scope='linear-1',
**op_kwargs)
# shape: [batch, embed_dim * 8]
y_hat = op.linear(y_hat,
dim=self._class_num,
activation_fn=None,
scope='linear-2',
**op_kwargs)
# shape: [batch, class_num]
self.evaluate_and_loss(y_hat, self.class_weights)
def bilstm(self, x):
# shape: [batch, seq_len, embed_dim]
if self.seq_len > 0:
# Static RNN
#lstm = tf.contrib.cudnn_rnn.CudnnLSTM(1, self.lstm_unit,
# direction='bidirectional')
#return tf.transpose(lstm(tf.transpose(x, [1, 0, 2]))[0], [1, 0, 2])
x_seq = tf.unstack(
tf.reshape(x, [-1, self.seq_len, x.get_shape()[-1]]),
axis=1)
out, _, _ = tf.nn.static_bidirectional_rnn(
cell_fw=tf.contrib.rnn.LSTMBlockCell(self.lstm_unit),
cell_bw=tf.contrib.rnn.LSTMBlockCell(self.lstm_unit),
inputs=x_seq,
dtype=tf.float32)
return tf.stack(out, axis=1)
else:
# Dynamic RNN
(outputs_fw, outputs_bw), (states_fw, states_bw) = \
tf.nn.bidirectional_dynamic_rnn(
cell_fw=tf.contrib.rnn.LSTMBlockCell(self.lstm_unit),
cell_bw=tf.contrib.rnn.LSTMBlockCell(self.lstm_unit),
inputs=x,
dtype=tf.float32)
return tf.concat([outputs_fw, outputs_bw], 2)
# shape: [batch, seq_len, embed_dim * 2]
def unfold_tree(self,
embed: tf.Tensor, # 3D: [batch, seq_len, embed_dim]
temp: tf.Tensor, # 3D: [batch, temp_len, temp_size]
tag: tf.Tensor, # 2D: [batch, seq_len + temp_len]
len_: tf.Tensor, # 1D: [batch]
suffix: str):
with tf.name_scope('unfold_tree_%s' % suffix):
batch_size = tf.shape(embed)[0]
# Create a container of size (x.len + temp.len + 1) for the
# unfoldered tree embeddings, where one zero embedding
# vector is padded at head.
tree = tf.pad(embed, [[0, 0], [1, tf.shape(temp)[1]], [0, 0]])
# NOTE: This is a trick to have a fixed embedding dimension in the
# construction time. This is used for initializing variables (e.g.
# in a linear transofrmation layer).
tree = tf.reshape(tree, [batch_size, -1, embed.get_shape()[-1]])
# shape: [batch, 1 + seq_len + temp_len, embed_dim]
# Add batch index to each template position.
temp = tf.expand_dims(temp, -1)
bidx = tf.tile(tf.reshape(tf.range(batch_size), [-1, 1, 1, 1]),
[1, tf.shape(temp)[1], tf.shape(temp)[2], 1])
temp = tf.concat([bidx, temp], axis=3)
# shape: [batch, temp_len, temp_size, 2]
temp = tf.cast(temp, tf.float32) # NOTE: register tf.gather in GPU.
# Pad a leading 0 to align with the unfolded tree
tag = tf.pad(tag, [[0, 0], [1, 0]])
tag = tf.cast(tag, tf.float32) # NOTE: register tf.gather in GPU.
# shape: [batch, 1 + tag_len]
# NOTE: tag_len <= seq_len + temp_len
# find the next available position (zero embedding)
top = tf.expand_dims(len_ + 1, -1)
# shape: [batch, 1]
i = tf.constant(1)
def loop_cond(i, tree, temp, tag, batch_size):
return tf.less(i, tf.shape(temp)[1])
def loop_body(i, tree, temp, tag, batch_size):
c_idx = tf.gather(temp, i, axis=1)
c_idx = tf.cast(c_idx, tf.int32) # NOTE: restore type
# shape: [batch, temp_size, 2]
p_idx = tf.concat(
[tf.expand_dims(tf.range(batch_size), -1), top + i],
axis=1)
# shape: [batch, 2]
p_tag = tf.gather_nd(tag, p_idx)
p_tag = tf.cast(p_tag, tf.int32) # NOTE: restore type
# shape: [batch]
c_embed = tf.gather_nd(tree, c_idx)
# shape: [batch, temp_size, embed_dim]
c_tag = tf.gather_nd(tag, c_idx)
c_tag = tf.cast(c_tag, tf.int32) # NOTE: restore type
# shape: [batch, temp_size]
p_embed = self.merge_fn(c_embed, c_tag, p_tag)
tree += tf.scatter_nd(
indices=p_idx,
updates=p_embed,
shape=tf.shape(tree))
i += 1
return [i, tree, temp, tag, batch_size]
_, x_loop, _, _, _ = tf.while_loop(loop_cond, loop_body,
[i, tree, temp, tag, batch_size],
parallel_iterations=1)
return x_loop
def merge_fn(self,
c_embeds: tf.Tensor, # 3D: [batch, temp_size, embed_dim]
c_tags: tf.Tensor, # 2D: [batch, temp_size]
p_tags: tf.Tensor # 1D: [batch]
)-> tf.Tensor: # 2D: [batch, embed_dim]
return tf.reduce_mean(c_embeds, axis=1)
```
#### File: src/nn/ngram.py
```python
from abc import ABC, abstractmethod
import typing as t
import numpy as np
import tensorflow as tf
import embed
from nn.decomposable import Decomposable
class Ngram(Decomposable, ABC):
def __init__(self,
ngram_size: t.Union[int, t.List[int]] = [2, 3],
**kwargs
)-> None:
self.ngram_size = ngram_size \
if isinstance(ngram_size, list) else [ngram_size]
super(Ngram, self).__init__(**kwargs)
def post_project(self, x1: tf.Tensor, x2: tf.Tensor):
for size in self.ngram_size:
x1_ngram, x2_ngram = self.ngram_embed(x1, x2, size)
x1 = tf.concat([x1, x1_ngram], 1)
x2 = tf.concat([x2, x2_ngram], 1)
return x1, x2
@abstractmethod
def ngram_embed(self,
x1: tf.Tensor,
x2: tf.Tensor,
ngram_size: int,
weight_stddev: float = 0.01
)-> tf.Tensor:
pass
class ConvNgram(Ngram):
def ngram_embed(self,
x1: tf.Tensor,
x2: tf.Tensor,
ngram_size: int,
weight_stddev: float = 0.01,
)-> tf.Tensor:
def ngram_embed_impl(x):
t = tf.expand_dims(x, -1)
t = tf.layers.conv2d(t,
filters=int(t.shape[2]),
kernel_size=(ngram_size, t.shape[2]),
kernel_initializer=tf.initializers.truncated_normal(
stddev=weight_stddev),
name='%d-gram-conv' % ngram_size,
reuse=tf.AUTO_REUSE)
t = tf.nn.relu(t)
t = tf.squeeze(t, [2])
return t
return map(ngram_embed_impl, [x1, x2])
class TagConvNgram(ConvNgram):
def __init__(self, tags_num: int = 45, **kwargs)-> None:
self.tags_num = tags_num
super(TagConvNgram, self).__init__(**kwargs)
def post_project(self, x1: tf.Tensor, x2: tf.Tensor):
tag_weight = tf.get_variable('tag_weight',
shape=(self.tags_num, 100),
dtype=tf.float32,
initializer=tf.initializers.truncated_normal(stddev=0.01))
#initializer=tf.initializers.constant(1))
x1 = tf.concat([x1, tf.gather(tag_weight, self.tag1)], 2)
x2 = tf.concat([x2, tf.gather(tag_weight, self.tag2)], 2)
x1_ngrams, x2_ngrams = [x1], [x2]
for size in self.ngram_size:
with tf.name_scope('%d-gram-embed' % size):
x1_ngram, x2_ngram = self.ngram_embed(x1, x2, size)
x1_ngrams += x1_ngram,
x2_ngrams += x2_ngram,
x1 = tf.concat(x1_ngrams, 1)
x2 = tf.concat(x2_ngrams, 1)
return x1, x2
class TagGatedConvNgram(ConvNgram):
def __init__(self, tags_num: int = 45, **kwargs)-> None:
self.tags_num = tags_num
super(TagGatedConvNgram, self).__init__(**kwargs)
def post_project(self, x1: tf.Tensor, x2: tf.Tensor):
tag_weight = tf.get_variable('tag_weight',
shape=(self.tags_num, 1),
dtype=tf.float32,
#initializer=tf.initializers.truncated_normal(stddev=0.01))
initializer=tf.initializers.constant(1))
x1 *= tf.gather(tag_weight, self.tag1)
x2 *= tf.gather(tag_weight, self.tag2)
for size in [n for n in self.ngram_size if n > 1]:
x1_ngram, x2_ngram = self.ngram_embed(x1, x2, size)
x1 = tf.concat([x1, x1_ngram], 1)
x2 = tf.concat([x2, x2_ngram], 1)
return x1, x2
```
#### File: NLI/src/op.py
```python
import typing as t
import tensorflow as tf
DEFAULT_VAR_INIT = tf.variance_scaling_initializer()
DEFAULT_SCALE_L1 = 0.0
DEFAULT_SCALE_L2 = 0.0
def get_variable(
name: str,
initializer: t.Callable = DEFAULT_VAR_INIT,
scale_l1: float = DEFAULT_SCALE_L1,
scale_l2: float = DEFAULT_SCALE_L2,
dtype: tf.DType = tf.float32,
**kwargs):
""" Get an existing tf.Variable with these parameters or create a new one.
This function calls `tf.get_variable` while overriding some default
parameters. All ops using variables MUST use this function for consistency.
"""
return tf.get_variable(name,
initializer=initializer,
regularizer=tf.contrib.layers.l1_l2_regularizer(scale_l1, scale_l2),
dtype=dtype,
**kwargs)
def conv2d(
inputs,
num_outputs,
kernel_size,
padding='SAME',
weight_initializer: t.Callable = DEFAULT_VAR_INIT,
biases_initializer: t.Callable = DEFAULT_VAR_INIT,
scale_l1: float = DEFAULT_SCALE_L1,
scale_l2: float = DEFAULT_SCALE_L2,
reuse=tf.AUTO_REUSE,
activation_fn=tf.nn.relu,
scope='conv2d'):
""" Conduct an N-D convolution (1 <= N <=3).
This function calls `tf.contrib.layers.conv2d` while overriding some default
parameters in order to be consistant with `get_variable`.
"""
return tf.contrib.layers.conv2d(inputs,
num_outputs,
kernel_size,
padding=padding,
weights_initializer=weight_initializer,
weights_regularizer=tf.contrib.layers.l1_l2_regularizer(
scale_l1, scale_l2),
biases_initializer=biases_initializer,
biases_regularizer=tf.contrib.layers.l1_l2_regularizer(
scale_l1, scale_l2),
reuse=reuse,
activation_fn=activation_fn,
scope=scope)
def linear(
inputs: tf.Tensor,
dim: int = -1,
activation_fn: t.Callable = tf.nn.relu,
bias: bool = True,
drop_after: bool = True,
keep_prob: float = 1.0,
scope: t.Union[str, tf.VariableScope] = None,
reuse: bool = tf.AUTO_REUSE,
**kwargs):
"""
Inputs: 3D-Tensor [batch, seq_len, input_dim], or
2D-Tensor [batch, input_dim]
Returns: 3D-Tensor [batch, seq_len, dim], or
2D-Tensor [batch, dim]
"""
dim = dim if dim > 0 else int(inputs.shape[-1])
t = tf.nn.dropout(inputs, keep_prob) if not drop_after else inputs
with tf.variable_scope(scope if scope else 'linear', reuse=reuse):
t_shape = tf.shape(t)
w = get_variable('weight', shape=[t.get_shape()[-1], dim], **kwargs)
output_rank = len(t.get_shape())
if output_rank == 3:
t = tf.reshape(t, [-1, t.shape[2]])
t = tf.matmul(t, w)
if bias:
b = get_variable('bias', shape=[dim], **kwargs)
t += b
t = activation_fn(t) if activation_fn else t
if output_rank == 3:
t = tf.reshape(t, [-1, t_shape[1], dim])
t = tf.nn.dropout(t, keep_prob) if drop_after else t
return t
def highway(
inputs: tf.Tensor,
scope: t.Union[str, tf.VariableScope] = None,
reuse: bool = tf.AUTO_REUSE,
**kwargs):
"""
Inputs: 3D-Tensor [batch, seq_len, input_dim], or
2D-Tensor [batch, input_dim]
Returns: 3D-Tensor [batch, seq_len, dim], or
2D-Tensor [batch, dim]
"""
with tf.variable_scope(scope if scope else 'highway', reuse=reuse):
trans = linear(inputs, scope='trans', **kwargs)
gate = linear(inputs, scope='gate', activation_fn=tf.sigmoid, **kwargs)
if inputs.shape[-1] != gate.shape[-1]:
# Additional afine transformation to project input into the shape
inputs = linear(inputs, scope='affine', **kwargs)
return gate * trans + (1 - gate) * inputs
def gated_fuse(
t1: tf.Tensor,
t2: tf.Tensor,
scope: t.Union[str, tf.VariableScope] = None,
reuse: bool = tf.AUTO_REUSE,
**kwargs
):
"""
Inputs: 3D-Tensor [batch, seq_len, t1_dim]
Returns: 3D-Tensor [batch, seq_len, t1_dim]
"""
with tf.variable_scope(scope if scope else 'gated-fuse', reuse=reuse):
proj = lambda inputs, scope: linear(inputs,
dim=t1.shape[-1],
scope=scope,
activation_fn=None,
**kwargs)
# NOTE: two tensors are concated in the paper
with tf.variable_scope('trans'):
trans = tf.nn.relu(proj(t1, 'linear1') + proj(t2, 'linear2'))
with tf.variable_scope('gate'):
gate = tf.sigmoid(proj(t1, 'linear1') + proj(t2, 'linear2'))
return gate * trans + (1 - gate) * t1
def lstm(
inputs: tf.Tensor,
seq_len: int,
hidden_size: int,
num_layers: int = 1,
cell_type: str = 'gru',
dynamic: bool = False,
bidirectional: bool = False,
scope: t.Union[str, tf.VariableScope] = None,
reuse: bool = tf.AUTO_REUSE
):
# type and shape check
if not isinstance(seq_len, int):
if len(seq_len.get_shape()) == 2:
seq_len = tf.squeeze(seq_len, axis=1)
if seq_len.dtype.is_floating:
seq_len = tf.cast(seq_len, tf.int32)
if len(inputs.get_shape()) == 4:
inputs = tf.squeeze(inputs, axis=3)
with tf.variable_scope(scope if scope else cell_type, reuse=reuse):
with tf.name_scope('multilayer-rnn'):
cell = _get_multi_rnn_cell(hidden_size,
num_layers=num_layers,
cell_type=cell_type,
bidirectional=bidirectional)
rnn_outputs = _get_rnn_outputs(cell, inputs, seq_len,
dynamic=dynamic,
bidirectional=bidirectional)
return rnn_outputs
def _get_multi_rnn_cell(hidden_size, num_layers=1, cell_type='gru',
bidirectional=False):
cell_types = {'lstm': tf.nn.rnn_cell.LSTMCell,
'gru': tf.nn.rnn_cell.GRUCell}
cell = cell_types.get(cell_type, None)
rnn_params = {'num_units': hidden_size,
'activation': tf.nn.relu}
if cell_type == 'lstm':
rnn_params['use_peepholes'] = True
cell_fw = tf.contrib.rnn.MultiRNNCell(
[cell(**rnn_params) for _ in range(num_layers)])
if bidirectional is False:
return cell_fw
else:
cell_bw = tf.contrib.rnn.MultiRNNCell(
[cell(**rnn_params) for _ in range(num_layers)])
return cell_fw, cell_bw
def _get_rnn_outputs(cell, inputs, seq_len, dynamic=True, bidirectional=False):
if dynamic is True:
if bidirectional is True:
(cell_fw, cell_bw) = cell
outputs, state = tf.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, tf.cast(inputs, tf.float32),
sequence_length=seq_len,
time_major=False,
dtype='float32')
output_fw, output_bw = outputs
outputs = tf.concat(axis=2, values=[output_fw, output_bw])
else:
outputs, state = tf.nn.dynamic_rnn(cell, tf.cast(inputs, tf.float32),
sequence_length=seq_len,
time_major=False,
dtype='float32')
else:
inputs = tf.unstack(inputs, axis=1)
inputs = [tf.cast(i, tf.float32) for i in inputs]
outputs, output_state_fw, output_state_bw = \
tf.nn.static_bidirectional_rnn(
cell_fw, cell_bw, inputs,
sequence_length=seq_len,
dtype='float32')
outputs = tf.stack(outputs, axis=1)
return outputs
```
#### File: src/util/build.py
```python
import os
import glob
from typing import List, Dict, Union
BUILD_DIR = './build'
def parse_args(sys_argv: List[str]) -> Dict[str, Union[str, int, float, bool]]:
""" Parse command line arguments to dictionary. The numeric values are auto
convert to the according types. """
kwargs = {} # type: Dict[str, Union[str, int, float, bool]]
if len(sys_argv) > 1:
for arg in sys_argv[1:]:
k = arg.split("=")[0][2:]
v = arg.split("=")[1] # type: Union[str, int, float, bool]
if v == 'True':
v = True
elif v == 'False':
v = False
else:
try:
v = int(v)
except ValueError:
try:
v = float(v)
except ValueError:
pass
kwargs[k] = v
return kwargs
def get_model_path(model_name: str) -> str:
return './build/models/{}'.format(model_name)
def get_new_default_model_path() -> str:
""" Get a model name 'modeli' with the minimum positive i and do not exist
in the build directory.
"""
model_path = get_model_path('model')
if not os.path.isdir(model_path):
return model_path
i = 1
while True:
if not os.path.isdir(model_path + str(i)):
return model_path + str(i)
i += 1
def get_last_default_model_path() -> str:
""" Get a model path 'modeli' with the maximum i that exists in the build
directory.
"""
for p in reversed(sorted(glob.glob(get_model_path('*')))):
if os.path.isdir(p):
return p
return get_model_path('model')
def get_log_path(model_path: str) -> str:
""" Get the pathname to the log path.
"""
return os.path.join(model_path, 'log')
def get_save_path(model_path: str) -> str:
""" Get the pathname to the save file.
"""
return os.path.join(model_path, 'model')
def get_saved_model(model_path: str, step: int = None) -> str:
#print('{}/*.index'.format(model_path))
#print(glob.glob('{}/*.index'.format(model_path)))
saved_path = '{}/model-{}*.index'.format(model_path, step if step else '')
# load the lastest trained model
offset = len(model_path) + 7
idx_file = sorted(glob.glob(saved_path),
key=lambda name: int(name[offset:name.rfind('.')]))[-1]
return os.path.splitext(idx_file)[0]
```
#### File: src/util/debug.py
```python
import tensorflow as tf
def tf_Print(input_, data, **kwargs):
kwargs = {'summarize': 2**30, 'first_n': 1, **kwargs}
return tf.Print(input_, data, **kwargs)
```
#### File: src/util/layers.py
```python
import tensorflow as tf
def lstm(inputs, seq_len, hidden_size, num_layers=1, cell_type='gru',
bidirectional=False):
# type and shape check
if not isinstance(seq_len, int):
if len(seq_len.get_shape()) == 2:
seq_len = tf.squeeze(seq_len, axis=1)
if seq_len.dtype.is_floating:
seq_len = tf.cast(seq_len, tf.int32)
if len(inputs.get_shape()) == 4:
inputs = tf.squeeze(inputs, axis=3)
with tf.name_scope('multilayer-rnn'):
cell = _get_multi_rnn_cell(hidden_size, num_layers=num_layers,
cell_type=cell_type, bidirectional=bidirectional)
rnn_outputs = _get_rnn_outputs(cell, inputs, seq_len,
bidirectional=bidirectional)
return rnn_outputs
def _get_multi_rnn_cell(hidden_size, num_layers=1, cell_type='gru',
bidirectional=False):
cell_types = {'lstm': tf.nn.rnn_cell.LSTMCell,
'gru': tf.nn.rnn_cell.GRUCell}
cell = cell_types.get(cell_type, None)
rnn_params = {'num_units': hidden_size,
'activation': tf.nn.relu}
if cell_type == 'lstm':
rnn_params['use_peepholes'] = True
cell_fw = tf.contrib.rnn.MultiRNNCell(
[cell(**rnn_params) for _ in range(num_layers)])
if bidirectional is False:
return cell_fw
else:
cell_bw = tf.contrib.rnn.MultiRNNCell(
[cell(**rnn_params) for _ in range(num_layers)])
return cell_fw, cell_bw
def _get_rnn_outputs(cell, inputs, seq_len, dynamic=True, bidirectional=False):
if dynamic is True:
if bidirectional is True:
(cell_fw, cell_bw) = cell
outputs, state = tf.nn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
tf.cast(inputs, tf.float32),
sequence_length=seq_len,
time_major=False,
dtype='float32')
output_fw, output_bw = outputs
outputs = tf.concat(axis=2, values=[output_fw, output_bw])
else:
outputs, state = tf.nn.dynamic_rnn(
cell,
tf.cast(inputs, tf.float32),
sequence_length=seq_len,
time_major=False,
dtype='float32')
else:
inputs = tf.unstack(inputs, axis=1)
inputs = [tf.cast(i, tf.float32) for i in inputs]
(outputs, output_state_fw, output_state_bw) = tf.nn.static_bidirectional_rnn(
cell_fw,
cell_bw,
inputs,
sequence_length=seq_len,
dtype='float32')
outputs = tf.stack(outputs, axis=1)
return outputs
``` |
{
"source": "jie-meng/toolscripts",
"score": 3
} |
#### File: toolscripts/excel/convert-text2num.py
```python
from openpyxl import load_workbook
def is_int(s):
try:
int(s)
return True
except ValueError:
return None
def is_float(s):
try:
float(s)
return True
except ValueError:
return None
def process_sheet(sheet):
for row in sheet.iter_rows():
for cell in row:
if type(cell.value) == str:
if is_int(cell.value) != None:
print('cell {0} converted to int {1}'.format(cell, cell.value))
cell.value = int(cell.value)
elif is_float(cell.value) != None:
print('cell {0} converted float {1}'.format(cell, cell.value))
cell.value = float(cell.value)
def process_workbook(source, dest):
wb = load_workbook(source)
for sheet in wb.worksheets:
process_sheet(sheet)
wb.save(dest)
if __name__ == "__main__":
print('please input source xlsx file path:')
source = input()
print('please input dest xlsx file path:')
dest = input()
process_workbook(source, dest)
print('\ndone!')
``` |
{
"source": "jiemichong/visionarycaptionsv1",
"score": 3
} |
#### File: src/application/text_to_viz.py
```python
from domain.viz.domain import Domain, project
from domain.text.operator_lang import OperatorLang, Peak, MoreThan, LessThan, NoOp
class TextToViz(object):
def __init__(self, phrases, histogram):
self.phrases = phrases
self.histogram = histogram
self.viz_domain = self.histogram.domain()
self.phrase_min = 0
self.phrase_max = 3000
self.phrase_domain = Domain(self.phrase_min, self.phrase_max)
def run(self):
for phrase in self.phrases:
op = self.phrase_to_op(phrase)
if type(op) is Peak:
spike = self.histogram.mode()
print(op, op.value)
print("KH: I'm not sure if this is the coordinate that we want to plot.", self.project(op.value), spike.height)
print("Carmen: need a radius. Maybe something like bar width * 3")
elif type(op) is MoreThan:
print(op, op.value)
print("KH: Is this the x-range that we want to plot for 'more than'?", self.project(op.value), self.project(self.phrase_max))
print("Carmen: OK-ish.")
elif type(op) is LessThan:
print(op, op.value)
print("KH: Is this the x-range that we want to plot for 'less than'?", self.project(self.phrase_min), self.project(op.value))
print("Carmen: x should starts around 542. width should be around 490")
print()
return
def project(self, value):
return project(value, self.phrase_domain, self.viz_domain)
def phrase_to_op(self, phrase) -> OperatorLang:
if Peak.match(phrase):
op = Peak(phrase)
elif MoreThan.match(phrase):
op = MoreThan(phrase)
elif LessThan.match(phrase):
op = LessThan(phrase)
else:
op = NoOp(phrase)
return op
```
#### File: domain/text/operator_lang.py
```python
class OperatorLang(object):
keywords = []
def __init__(self):
pass
@classmethod
def match(cls, phrase):
return any([keyword in phrase for keyword in cls.keywords])
class NoOp(OperatorLang):
def __init__(self, phrase):
self.phrase = phrase
def __str__(self):
return "NoOp({self.phrase})".format(self=self)
class LessThan(OperatorLang):
keywords = ["fewer"]
@property
def value(self):
digits = [float(s) for s in self.phrase.split() if s.isdigit()]
return max(digits)
def __init__(self, phrase):
self.phrase = phrase
def __str__(self):
return "LessThan({self.phrase})".format(self=self)
class MoreThan(OperatorLang):
keywords = ["more"]
@property
def value(self):
digits = [float(s) for s in self.phrase.split() if s.isdigit()]
return max(digits)
def __init__(self, phrase):
self.phrase = phrase
def __str__(self):
return "MoreThan({self.phrase})".format(self=self)
class Peak(OperatorLang):
keywords = ["spike", "max", "mode"]
@property
def value(self):
digits = [float(s) for s in self.phrase.split() if s.isdigit()]
return digits[0]
def __init__(self, phrase):
self.phrase = phrase
def __str__(self):
return "Peak({self.phrase})".format(self=self)
```
#### File: domain/text/test_emphasis.py
```python
from src.domain.viz.circle import Circle
from src.domain.text.emphasis import Emphasis
def test_emphasis():
phrase = "test phrase"
shape = Circle(100, 100, 10)
emphasis = Emphasis(phrase, shape)
assert emphasis.phrase == phrase
assert emphasis.shape.type == "circle"
assert emphasis.shape.x == 100
assert emphasis.shape.y == 100
assert emphasis.shape.r == 10
circle_dict = {
"phrase": "The spike around 1000 calories",
"shape": {
"type": "circle",
"x": 339,
"y": 460,
"r": 45
}
}
rect_dict = {
"phrase": "fewer than 650 calories such as a cheese-free burrito bowl",
"shape": {
"type": "rectangle",
"x": 2,
"y": 0,
"w": 220,
"h": 500
}
}
def test_emphasis_from_dict():
circle_emph = Emphasis.from_dict(circle_dict)
assert circle_emph.phrase == circle_dict["phrase"]
assert circle_emph.shape.x == circle_dict["shape"]["x"]
assert circle_emph.shape.y == circle_dict["shape"]["y"]
assert circle_emph.shape.r == circle_dict["shape"]["r"]
rect_emph = Emphasis.from_dict(rect_dict)
assert rect_emph.phrase == rect_dict["phrase"]
assert rect_emph.shape.x == rect_dict["shape"]["x"]
assert rect_emph.shape.y == rect_dict["shape"]["y"]
assert rect_emph.shape.width == rect_dict["shape"]["w"]
assert rect_emph.shape.height == rect_dict["shape"]["h"]
``` |
{
"source": "jieming2002/models-quiz8",
"score": 2
} |
#### File: models-quiz8/research/inference.py
```python
import argparse
import os
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from PIL import Image
from utils import visualization_utils as vis_util
from utils import label_map_util
if tf.__version__ < '1.4.0':
raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')
NUM_CLASSES = 5
def parse_args(check=True):
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', type=str, required=True)
parser.add_argument('--dataset_dir', type=str, required=True)
FLAGS, unparsed = parser.parse_known_args()
return FLAGS, unparsed
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
PATH_TO_CKPT = os.path.join(FLAGS.output_dir, 'exported_graphs/frozen_inference_graph.pb')
PATH_TO_LABELS = os.path.join(FLAGS.dataset_dir, 'labels_items.txt')
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
test_img_path = os.path.join(FLAGS.dataset_dir, 'test.jpg')
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
image = Image.open(test_img_path)
image_np = load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
print('skye boxes=', boxes)
# scores[0][0] = 0.99 # Output.png 上面没有预测结果信息. 准确率太低?是的。最后的框是会有个准确率阈值的。
print('skye scores=',scores)
print('skye classes=', classes)
print('skye category_index=', category_index)
image_np = vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
plt.imsave(os.path.join(FLAGS.output_dir, 'output.png'), image_np)
```
#### File: object_detection/meta_architectures/ssd_meta_arch.py
```python
from abc import abstractmethod
import re
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import box_predictor as bpredictor
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner
from object_detection.utils import shape_utils
from object_detection.utils import visualization_utils
slim = tf.contrib.slim
class SSDFeatureExtractor(object):
"""SSD Feature Extractor definition."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
batch_norm_trainable=True,
reuse_weights=None):
"""Constructor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a small batch size
(e.g. 1), it is desirable to disable batch norm update and use
pretrained batch norm params.
reuse_weights: whether to reuse variables. Default is None.
"""
self._is_training = is_training
self._depth_multiplier = depth_multiplier
self._min_depth = min_depth
self._pad_to_multiple = pad_to_multiple
self._conv_hyperparams = conv_hyperparams
self._batch_norm_trainable = batch_norm_trainable
self._reuse_weights = reuse_weights
@abstractmethod
def preprocess(self, resized_inputs):
"""Preprocesses images for feature extraction (minus image resizing).
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
pass
@abstractmethod
def extract_features(self, preprocessed_inputs):
"""Extracts features from preprocessed inputs.
This function is responsible for extracting feature maps from preprocessed
images.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
pass
class SSDMetaArch(model.DetectionModel):
"""SSD Meta-architecture definition."""
def __init__(self,
is_training,
anchor_generator,
box_predictor,
box_coder,
feature_extractor,
matcher,
region_similarity_calculator,
image_resizer_fn,
non_max_suppression_fn,
score_conversion_fn,
classification_loss,
localization_loss,
classification_loss_weight,
localization_loss_weight,
normalize_loss_by_num_matches,
hard_example_miner,
add_summaries=True):
"""SSDMetaArch Constructor.
TODO: group NMS parameters + score converter into a class and loss
parameters into a class and write config protos for postprocessing
and losses.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
anchor_generator: an anchor_generator.AnchorGenerator object.
box_predictor: a box_predictor.BoxPredictor object.
box_coder: a box_coder.BoxCoder object.
feature_extractor: a SSDFeatureExtractor object.
matcher: a matcher.Matcher object.
region_similarity_calculator: a
region_similarity_calculator.RegionSimilarityCalculator object.
image_resizer_fn: a callable for image resizing. This callable always
takes a rank-3 image tensor (corresponding to a single image) and
returns a rank-3 image tensor, possibly with new spatial dimensions.
See builders/image_resizer_builder.py.
non_max_suppression_fn: batch_multiclass_non_max_suppression
callable that takes `boxes`, `scores` and optional `clip_window`
inputs (with all other inputs already set) and returns a dictionary
hold tensors with keys: `detection_boxes`, `detection_scores`,
`detection_classes` and `num_detections`. See `post_processing.
batch_multiclass_non_max_suppression` for the type and shape of these
tensors.
score_conversion_fn: callable elementwise nonlinearity (that takes tensors
as inputs and returns tensors). This is usually used to convert logits
to probabilities.
classification_loss: an object_detection.core.losses.Loss object.
localization_loss: a object_detection.core.losses.Loss object.
classification_loss_weight: float
localization_loss_weight: float
normalize_loss_by_num_matches: boolean
hard_example_miner: a losses.HardExampleMiner object (can be None)
add_summaries: boolean (default: True) controlling whether summary ops
should be added to tensorflow graph.
"""
super(SSDMetaArch, self).__init__(num_classes=box_predictor.num_classes)
self._is_training = is_training
# Needed for fine-tuning from classification checkpoints whose
# variables do not have the feature extractor scope.
self._extract_features_scope = 'FeatureExtractor'
self._anchor_generator = anchor_generator
self._box_predictor = box_predictor
self._box_coder = box_coder
self._feature_extractor = feature_extractor
self._matcher = matcher
self._region_similarity_calculator = region_similarity_calculator
# TODO: handle agnostic mode and positive/negative class weights
unmatched_cls_target = None
unmatched_cls_target = tf.constant([1] + self.num_classes * [0], tf.float32)
self._target_assigner = target_assigner.TargetAssigner(
self._region_similarity_calculator,
self._matcher,
self._box_coder,
positive_class_weight=1.0,
negative_class_weight=1.0,
unmatched_cls_target=unmatched_cls_target)
self._classification_loss = classification_loss
self._localization_loss = localization_loss
self._classification_loss_weight = classification_loss_weight
self._localization_loss_weight = localization_loss_weight
self._normalize_loss_by_num_matches = normalize_loss_by_num_matches
self._hard_example_miner = hard_example_miner
self._image_resizer_fn = image_resizer_fn
self._non_max_suppression_fn = non_max_suppression_fn
self._score_conversion_fn = score_conversion_fn
self._anchors = None
self._add_summaries = add_summaries
@property
def anchors(self):
if not self._anchors:
raise RuntimeError('anchors have not been constructed yet!')
if not isinstance(self._anchors, box_list.BoxList):
raise RuntimeError('anchors should be a BoxList object, but is not.')
return self._anchors
def preprocess(self, inputs):
"""Feature-extractor specific preprocessing.
See base class.
Args:
inputs: a [batch, height_in, width_in, channels] float tensor representing
a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float
tensor representing a batch of images.
Raises:
ValueError: if inputs tensor does not have type tf.float32
"""
if inputs.dtype is not tf.float32:
raise ValueError('`preprocess` expects a tf.float32 tensor')
with tf.name_scope('Preprocessor'):
# TODO: revisit whether to always use batch size as the number of parallel
# iterations vs allow for dynamic batching.
resized_inputs = tf.map_fn(self._image_resizer_fn,
elems=inputs,
dtype=tf.float32)
return self._feature_extractor.preprocess(resized_inputs)
def predict(self, preprocessed_inputs):
"""Predicts unpostprocessed tensors from input tensor.
This function takes an input batch of images and runs it through the forward
pass of the network to yield unpostprocessesed predictions.
A side effect of calling the predict method is that self._anchors is
populated with a box_list.BoxList of anchors. These anchors must be
constructed before the postprocess or loss functions can be called.
Args:
preprocessed_inputs: a [batch, height, width, channels] image tensor.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) box_encodings: 4-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions (at class index 0).
3) feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i].
4) anchors: 2-D float tensor of shape [num_anchors, 4] containing
the generated anchors in normalized coordinates.
"""
with tf.variable_scope(None, self._extract_features_scope,
[preprocessed_inputs]):
feature_maps = self._feature_extractor.extract_features(
preprocessed_inputs)
feature_map_spatial_dims = self._get_feature_map_spatial_dims(feature_maps)
image_shape = tf.shape(preprocessed_inputs)
self._anchors = self._anchor_generator.generate(
feature_map_spatial_dims,
im_height=image_shape[1],
im_width=image_shape[2])
(box_encodings, class_predictions_with_background
) = self._add_box_predictions_to_feature_maps(feature_maps)
predictions_dict = {
'box_encodings': box_encodings,
'class_predictions_with_background': class_predictions_with_background,
'feature_maps': feature_maps,
'anchors': self._anchors.get()
}
return predictions_dict
def _add_box_predictions_to_feature_maps(self, feature_maps):
"""Adds box predictors to each feature map and returns concatenated results.
Args:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
Returns:
box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions (at class index 0).
Raises:
RuntimeError: if the number of feature maps extracted via the
extract_features method does not match the length of the
num_anchors_per_locations list that was passed to the constructor.
RuntimeError: if box_encodings from the box_predictor does not have
shape of the form [batch_size, num_anchors, 1, code_size].
"""
num_anchors_per_location_list = (
self._anchor_generator.num_anchors_per_location())
if len(feature_maps) != len(num_anchors_per_location_list):
raise RuntimeError('the number of feature maps must match the '
'length of self.anchors.NumAnchorsPerLocation().')
box_encodings_list = []
cls_predictions_with_background_list = []
for idx, (feature_map, num_anchors_per_location
) in enumerate(zip(feature_maps, num_anchors_per_location_list)):
box_predictor_scope = 'BoxPredictor_{}'.format(idx)
box_predictions = self._box_predictor.predict(feature_map,
num_anchors_per_location,
box_predictor_scope)
box_encodings = box_predictions[bpredictor.BOX_ENCODINGS]
cls_predictions_with_background = box_predictions[
bpredictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
box_encodings_shape = box_encodings.get_shape().as_list()
if len(box_encodings_shape) != 4 or box_encodings_shape[2] != 1:
raise RuntimeError('box_encodings from the box_predictor must be of '
'shape `[batch_size, num_anchors, 1, code_size]`; '
'actual shape', box_encodings_shape)
box_encodings = tf.squeeze(box_encodings, axis=2)
box_encodings_list.append(box_encodings)
cls_predictions_with_background_list.append(
cls_predictions_with_background)
num_predictions = sum(
[tf.shape(box_encodings)[1] for box_encodings in box_encodings_list])
num_anchors = self.anchors.num_boxes()
anchors_assert = tf.assert_equal(num_anchors, num_predictions, [
'Mismatch: number of anchors vs number of predictions', num_anchors,
num_predictions
])
with tf.control_dependencies([anchors_assert]):
box_encodings = tf.concat(box_encodings_list, 1)
class_predictions_with_background = tf.concat(
cls_predictions_with_background_list, 1)
return box_encodings, class_predictions_with_background
def _get_feature_map_spatial_dims(self, feature_maps):
"""Return list of spatial dimensions for each feature map in a list.
Args:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i].
Returns:
a list of pairs (height, width) for each feature map in feature_maps
"""
feature_map_shapes = [
shape_utils.combined_static_and_dynamic_shape(
feature_map) for feature_map in feature_maps
]
return [(shape[1], shape[2]) for shape in feature_map_shapes]
def postprocess(self, prediction_dict):
"""Converts prediction tensors to final detections.
This function converts raw predictions tensors to final detection results by
slicing off the background class, decoding box predictions and applying
non max suppression and clipping to the image window.
See base class for output format conventions. Note also that by default,
scores are to be interpreted as logits, but if a score_conversion_fn is
used, then scores are remapped (and may thus have a different
interpretation).
Args:
prediction_dict: a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
Returns:
detections: a dictionary containing the following fields
detection_boxes: [batch, max_detections, 4]
detection_scores: [batch, max_detections]
detection_classes: [batch, max_detections]
detection_keypoints: [batch, max_detections, num_keypoints, 2] (if
encoded in the prediction_dict 'box_encodings')
num_detections: [batch]
Raises:
ValueError: if prediction_dict does not contain `box_encodings` or
`class_predictions_with_background` fields.
"""
if ('box_encodings' not in prediction_dict or
'class_predictions_with_background' not in prediction_dict):
raise ValueError('prediction_dict does not contain expected entries.')
with tf.name_scope('Postprocessor'):
box_encodings = prediction_dict['box_encodings']
class_predictions = prediction_dict['class_predictions_with_background']
detection_boxes, detection_keypoints = self._batch_decode(box_encodings)
detection_boxes = tf.expand_dims(detection_boxes, axis=2)
class_predictions_without_background = tf.slice(class_predictions,
[0, 0, 1],
[-1, -1, -1])
detection_scores = self._score_conversion_fn(
class_predictions_without_background)
clip_window = tf.constant([0, 0, 1, 1], tf.float32)
additional_fields = None
if detection_keypoints is not None:
additional_fields = {
fields.BoxListFields.keypoints: detection_keypoints}
(nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields,
num_detections) = self._non_max_suppression_fn(
detection_boxes,
detection_scores,
clip_window=clip_window,
additional_fields=additional_fields)
detection_dict = {'detection_boxes': nmsed_boxes,
'detection_scores': nmsed_scores,
'detection_classes': nmsed_classes,
'num_detections': tf.to_float(num_detections)}
if (nmsed_additional_fields is not None and
fields.BoxListFields.keypoints in nmsed_additional_fields):
detection_dict['detection_keypoints'] = nmsed_additional_fields[
fields.BoxListFields.keypoints]
return detection_dict
def loss(self, prediction_dict, scope=None):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
scope: Optional scope name.
Returns:
a dictionary mapping loss keys (`localization_loss` and
`classification_loss`) to scalar tensors representing corresponding loss
values.
"""
with tf.name_scope(scope, 'Loss', prediction_dict.values()):
keypoints = None
if self.groundtruth_has_field(fields.BoxListFields.keypoints):
keypoints = self.groundtruth_lists(fields.BoxListFields.keypoints)
(batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, match_list) = self._assign_targets(
self.groundtruth_lists(fields.BoxListFields.boxes),
self.groundtruth_lists(fields.BoxListFields.classes),
keypoints)
if self._add_summaries:
self._summarize_input(
self.groundtruth_lists(fields.BoxListFields.boxes), match_list)
num_matches = tf.stack(
[match.num_matched_columns() for match in match_list])
location_losses = self._localization_loss(
prediction_dict['box_encodings'],
batch_reg_targets,
ignore_nan_targets=True,
weights=batch_reg_weights)
# print('skye location_losses=', location_losses)
# print('skye location_losses.shape=', location_losses.shape)
cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'],
batch_cls_targets,
weights=batch_cls_weights)
if self._hard_example_miner:
(localization_loss, classification_loss) = self._apply_hard_mining(
location_losses, cls_losses, prediction_dict, match_list)
if self._add_summaries:
self._hard_example_miner.summarize()
else:
if self._add_summaries:
class_ids = tf.argmax(batch_cls_targets, axis=2)
flattened_class_ids = tf.reshape(class_ids, [-1])
flattened_classification_losses = tf.reshape(cls_losses, [-1])
self._summarize_anchor_classification_loss(
flattened_class_ids, flattened_classification_losses)
localization_loss = tf.reduce_sum(location_losses)
classification_loss = tf.reduce_sum(cls_losses)
# Optionally normalize by number of positive matches
normalizer = tf.constant(1.0, dtype=tf.float32)
if self._normalize_loss_by_num_matches:
normalizer = tf.maximum(tf.to_float(tf.reduce_sum(num_matches)), 1.0)
with tf.name_scope('localization_loss'):
localization_loss = ((self._localization_loss_weight / normalizer) *
localization_loss)
with tf.name_scope('classification_loss'):
classification_loss = ((self._classification_loss_weight / normalizer) *
classification_loss)
loss_dict = {
'localization_loss': localization_loss,
'classification_loss': classification_loss
}
return loss_dict
def _summarize_anchor_classification_loss(self, class_ids, cls_losses):
positive_indices = tf.where(tf.greater(class_ids, 0))
positive_anchor_cls_loss = tf.squeeze(
tf.gather(cls_losses, positive_indices), axis=1)
visualization_utils.add_cdf_image_summary(positive_anchor_cls_loss,
'PositiveAnchorLossCDF')
negative_indices = tf.where(tf.equal(class_ids, 0))
negative_anchor_cls_loss = tf.squeeze(
tf.gather(cls_losses, negative_indices), axis=1)
visualization_utils.add_cdf_image_summary(negative_anchor_cls_loss,
'NegativeAnchorLossCDF')
def _assign_targets(self, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_keypoints_list=None):
"""Assign groundtruth targets.
Adds a background class to each one-hot encoding of groundtruth classes
and uses target assigner to obtain regression and classification targets.
Args:
groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
containing coordinates of the groundtruth boxes.
Groundtruth boxes are provided in [y_min, x_min, y_max, x_max]
format and assumed to be normalized and clipped
relative to the image window with y_min <= y_max and x_min <= x_max.
groundtruth_classes_list: a list of 2-D one-hot (or k-hot) tensors of
shape [num_boxes, num_classes] containing the class targets with the 0th
index assumed to map to the first non-background class.
groundtruth_keypoints_list: (optional) a list of 3-D tensors of shape
[num_boxes, num_keypoints, 2]
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
"""
groundtruth_boxlists = [
box_list.BoxList(boxes) for boxes in groundtruth_boxes_list
]
groundtruth_classes_with_background_list = [
tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT')
for one_hot_encoding in groundtruth_classes_list
]
if groundtruth_keypoints_list is not None:
for boxlist, keypoints in zip(
groundtruth_boxlists, groundtruth_keypoints_list):
boxlist.add_field(fields.BoxListFields.keypoints, keypoints)
return target_assigner.batch_assign_targets(
self._target_assigner, self.anchors, groundtruth_boxlists,
groundtruth_classes_with_background_list)
def _summarize_input(self, groundtruth_boxes_list, match_list):
"""Creates tensorflow summaries for the input boxes and anchors.
This function creates four summaries corresponding to the average
number (over images in a batch) of (1) groundtruth boxes, (2) anchors
marked as positive, (3) anchors marked as negative, and (4) anchors marked
as ignored.
Args:
groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
containing corners of the groundtruth boxes.
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
"""
num_boxes_per_image = tf.stack(
[tf.shape(x)[0] for x in groundtruth_boxes_list])
pos_anchors_per_image = tf.stack(
[match.num_matched_columns() for match in match_list])
neg_anchors_per_image = tf.stack(
[match.num_unmatched_columns() for match in match_list])
ignored_anchors_per_image = tf.stack(
[match.num_ignored_columns() for match in match_list])
tf.summary.scalar('Input/AvgNumGroundtruthBoxesPerImage',
tf.reduce_mean(tf.to_float(num_boxes_per_image)))
tf.summary.scalar('Input/AvgNumPositiveAnchorsPerImage',
tf.reduce_mean(tf.to_float(pos_anchors_per_image)))
tf.summary.scalar('Input/AvgNumNegativeAnchorsPerImage',
tf.reduce_mean(tf.to_float(neg_anchors_per_image)))
tf.summary.scalar('Input/AvgNumIgnoredAnchorsPerImage',
tf.reduce_mean(tf.to_float(ignored_anchors_per_image)))
def _apply_hard_mining(self, location_losses, cls_losses, prediction_dict,
match_list):
"""Applies hard mining to anchorwise losses.
Args:
location_losses: Float tensor of shape [batch_size, num_anchors]
representing anchorwise location losses.
cls_losses: Float tensor of shape [batch_size, num_anchors]
representing anchorwise classification losses.
prediction_dict: p a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
Returns:
mined_location_loss: a float scalar with sum of localization losses from
selected hard examples.
mined_cls_loss: a float scalar with sum of classification losses from
selected hard examples.
"""
class_predictions = tf.slice(
prediction_dict['class_predictions_with_background'], [0, 0,
1], [-1, -1, -1])
decoded_boxes, _ = self._batch_decode(prediction_dict['box_encodings'])
decoded_box_tensors_list = tf.unstack(decoded_boxes)
class_prediction_list = tf.unstack(class_predictions)
decoded_boxlist_list = []
for box_location, box_score in zip(decoded_box_tensors_list,
class_prediction_list):
decoded_boxlist = box_list.BoxList(box_location)
decoded_boxlist.add_field('scores', box_score)
decoded_boxlist_list.append(decoded_boxlist)
return self._hard_example_miner(
location_losses=location_losses,
cls_losses=cls_losses,
decoded_boxlist_list=decoded_boxlist_list,
match_list=match_list)
def _batch_decode(self, box_encodings):
"""Decodes a batch of box encodings with respect to the anchors.
Args:
box_encodings: A float32 tensor of shape
[batch_size, num_anchors, box_code_size] containing box encodings.
Returns:
decoded_boxes: A float32 tensor of shape
[batch_size, num_anchors, 4] containing the decoded boxes.
decoded_keypoints: A float32 tensor of shape
[batch_size, num_anchors, num_keypoints, 2] containing the decoded
keypoints if present in the input `box_encodings`, None otherwise.
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(
box_encodings)
batch_size = combined_shape[0]
tiled_anchor_boxes = tf.tile(
tf.expand_dims(self.anchors.get(), 0), [batch_size, 1, 1])
tiled_anchors_boxlist = box_list.BoxList(
tf.reshape(tiled_anchor_boxes, [-1, 4]))
decoded_boxes = self._box_coder.decode(
tf.reshape(box_encodings, [-1, self._box_coder.code_size]),
tiled_anchors_boxlist)
decoded_keypoints = None
if decoded_boxes.has_field(fields.BoxListFields.keypoints):
decoded_keypoints = decoded_boxes.get_field(
fields.BoxListFields.keypoints)
num_keypoints = decoded_keypoints.get_shape()[1]
decoded_keypoints = tf.reshape(
decoded_keypoints,
tf.stack([combined_shape[0], combined_shape[1], num_keypoints, 2]))
decoded_boxes = tf.reshape(decoded_boxes.get(), tf.stack(
[combined_shape[0], combined_shape[1], 4]))
return decoded_boxes, decoded_keypoints
def restore_map(self, from_detection_checkpoint=True):
"""Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
Args:
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in tf.global_variables():
if variable.op.name.startswith(self._extract_features_scope):
var_name = variable.op.name
if not from_detection_checkpoint:
var_name = (re.split('^' + self._extract_features_scope + '/',
var_name)[-1])
variables_to_restore[var_name] = variable
return variables_to_restore
``` |
{
"source": "jiemingChen/ArmControl",
"score": 2
} |
#### File: panda_simulation/script/compensa.py
```python
import PyKDL as kdl
from urdf_parser_py.urdf import URDF
from pykdl_utils.kdl_parser import kdl_tree_from_urdf_model
from pykdl_utils.kdl_kinematics import KDLKinematics
import rospy
import numpy as np
from math import sin, cos, sqrt
from sensor_msgs.msg import JointState
from std_msgs.msg import Float64
from pdb import set_trace
model_file = "/home/jieming/catkin_ws/src/panda_simulation/franka_description/robots/model.urdf"
jt_positions = kdl.JntArray(7)
jt_velocities = kdl.JntArray(7)
flag=0
def callback(data):
global flag
if(flag==0):
print('first callback')
flag =1
for i in range(7):
jt_positions[i] = data.position[i]
jt_velocities[i] = data.velocity[i]
def main():
#ros configure
rospy.init_node('inv_dyn', anonymous=True)
rate = rospy.Rate(2000) # 1000hz
rospy.Subscriber("/robot1/joint_states", JointState, callback)
pub = rospy.Publisher('/robot1/panda_joint1_controller/command', Float64, queue_size=10)
pub2 = rospy.Publisher('/robot1/panda_joint2_controller/command', Float64, queue_size=10)
pub3 = rospy.Publisher('/robot1/panda_joint3_controller/command', Float64, queue_size=10)
pub4 = rospy.Publisher('/robot1/panda_joint4_controller/command', Float64, queue_size=10)
pub5 = rospy.Publisher('/robot1/panda_joint5_controller/command', Float64, queue_size=10)
pub6 = rospy.Publisher('/robot1/panda_joint6_controller/command', Float64, queue_size=10)
pub7 = rospy.Publisher('/robot1/panda_joint7_controller/command', Float64, queue_size=10)
#kdl configure
robot = URDF.from_xml_file(model_file)
tree = kdl_tree_from_urdf_model(robot)
chain = tree.getChain("panda_link0", "panda_link8")
grav_vector = kdl.Vector(0, 0, -9.8)
dyn_kdl = kdl.ChainDynParam(chain, grav_vector)
kdl_kin = KDLKinematics(robot, "panda_link0", "panda_link8")
grav_matrix = kdl.JntArray(7)
coriolis = kdl.JntArray(7)
goal_pose = kdl.Vector(0.1, 0, 0.9)
print('start')
#main
while not rospy.is_shutdown():
rate.sleep()
#if flag==0:
# continue
jt_positions[0]=0; jt_positions[1]=-0.785; jt_positions[2] = 0;
jt_positions[3]=-2.356; jt_positions[4]=0; jt_positions[5] = 1.57;
jt_positions[6]=0.785
dyn_kdl.JntToGravity(jt_positions, grav_matrix)
dyn_kdl.JntToCoriolis(jt_positions, jt_velocities, coriolis)
J = kdl_kin.jacobian(list(jt_positions))
cur_pose = kdl_kin.forward(list(jt_positions))
print(cur_pose)
print(coriolis)
print(grav_matrix)
print(J)
set_trace()
pos_error = np.array([ [goal_pose[0]-cur_pose[0,3]],
[goal_pose[1]-cur_pose[1,3]],
[goal_pose[2]-cur_pose[2,3]],
[0], [0], [0]])*100
vel_error = -2*(J.dot(list(jt_velocities))).reshape((6,1))
error = pos_error + vel_error
#error = kdl.Vector(goal_pose[0]-cur_pose[0,3], goal_pose[1]-cur_pose[1,3], goal_pose[2]-cur_pose[2,3], 0, 0, 0)*100
tau_task = J.T.dot(error)
#u = [grav_matrix[i]+coriolis[i]+tau_task[i] for i in range(grav_matrix.rows())]
u = [grav_matrix[i]+coriolis[i] for i in range(grav_matrix.rows())]
pub.publish( u[0])
pub2.publish(u[1])
pub3.publish(u[2])
pub4.publish(u[3])
pub5.publish(u[4])
pub6.publish(u[5])
pub7.publish(u[6])
main()
#print grav_matrix
#gravity_compensating_jt_torques = [grav_matrix[i] for i in range(grav_matrix.rows())]
#print gravity_compensating_jt_torques
#print tree.getNrOfSegments() ###8
#print tree.getNrOfSegments() ###8
#kdl_kin = KDLKinematics(robot, "panda_link0", "panda_link8")
#q = [1, 1, 3.1415926/2, -0.5, 0, 0, 0]
#q = [0, 0, 0, 0, 0, 0, 0]
#pose = kdl_kin.forward(q)
#print pose
#J = kdl_kin.jacobian(q)
#print J
#kdl_kin = KDLKinematics(robot, "panda_link0", "panda_link8")
#q = [1, 1, 3.1415926/2, -0.5, 0, 0, 0]
#q = [0, 0, 0, 0, 0, 0, 0]
#pose = kdl_kin.forward(q)
#print pose
#J = kdl_kin.jacobian(q)
#print J
``` |
{
"source": "jiena76/X11-Core",
"score": 3
} |
#### File: hardware/motor_control/__init__.py
```python
def MotorControl(*args, **kwargs):
try:
from MotorControl import MotorControl as MotorControl
return MotorControl(*args, **kwargs)
except Exception as e:
print "Failed to Initialize Hardware Motor Control (I2C PWM Device)"
print "Error: %s" % e.message
print "Using Mock Motor Control"
from MotorControl_Mock import MotorControl as MotorControl_Mock
return MotorControl_Mock()
```
#### File: hardware/motor_control/MotorControl_Mock.py
```python
class MotorControl(object):
def __init__(self, *args, **kwargs):
pass
def set(self, *args, **kwargs):
pass
def kill(self, *args, **kwargs):
pass
```
#### File: movement/hardware/Thrusters_Mock.py
```python
from BaseThrusters import BaseThrusters
class Thrusters(BaseThrusters):
def __init__(self):
pass
def set(self, values):
pass
def get(self):
pass
def stop(self):
pass
```
#### File: movement/mapper/BaseMapper.py
```python
from abc import ABCMeta, abstractmethod
class BaseMapper(object):
""" Abstract Base Class for a thrust mapper. It should implement one method,
calculate that takes in a desired thrust and a list of disabled thrusters.
"""
__metaclass__ = ABCMeta
@abstractmethod
def calculate(self, desired_thrust, disabled_thrusters=[]):
pass
```
#### File: rov/movement/structs_movement.py
```python
class thrusters_struct():
def __init__(self, thruster_values):
"""Takes in an array of thruster values and returns a class that wraps all of the values in easy to read
mnemonics and provides some very useful helper classes"""
self.__all_thrusters = thruster_values
def hor_front_left(self):
return self.__all_thrusters[0]
def hor_front_right(self):
return self.__all_thrusters[1]
def hor_back_left(self):
return self.__all_thrusters[2]
def hor_back_right(self):
return self.__all_thrusters[3]
def vert_front_left(self):
return self.__all_thrusters[4]
def vert_front_right(self):
return self.__all_thrusters[5]
def vert_back_left(self):
return self.__all_thrusters[6]
def vert_back_right(self):
return self.__all_thrusters[7]
def __iter__(self):
"""Allows for iterations over all of the thrusters"""
return iter(self.__all_thrusters)
def __sub__(self, other):
"""Subtracts the right thruster from the left thruster and returns a new thruster_struct class"""
return thrusters_struct([a - b for a, b in zip(self.__all_thrusters, other.all_thrusters)])
def __getitem__(self, index):
"""Allows array like lookups of the class so previous code will still work. Basically, you can
still treat this class as a list and the code will still work"""
return self.__all_thrusters[index]
def get_vert_thruster_values(self):
"""Returns all of the vertical thrusters"""
return self.__all_thrusters[4:8]
def get_horr_thruster_valeus(self):
"""Returns all of the horizontal thrusters"""
return self.__all_thrusters[0:4]
def normalize(self):
"""Normalizes all thruster values so that the largest thruster value becomes 1 with
all other thruster values scaled accordingly"""
max_value = max([abs(x) for x in self.__all_thrusters])
# Only normalize the thruster values if one of them is over 1.0
if max_value > 1:
self.__all_thrusters = [float(x) / max_value for x in self.__all_thrusters]
def stop(self):
"""Turns all thruster values to zero"""
self.__all_thrusters = [0 for _ in self.__all_thrusters]
if __name__ == "__main__":
a = [1, 2, 3, 4, 5, 6, 7, 8]
thrusters = thrusters_struct(a)
thrusters2 = thrusters_struct([11, 12, 13, 14, 15, 16, 17, 18])
print thrusters[0]
a[0] = 9
print thrusters[0]
a = [8, 7, 6, 5, 4, ]
"""for thruster in thrusters:
print thruster
thrusters3 = thrusters2 - thrusters
for thruster in thrusters3:
print thruster
print thrusters[7]
for thruster in thrusters.get_horr_thruster_valeus():
print "horr: " + str(thruster)
for thruster in thrusters.get_vert_thruster_values():
print "vert: " + str(thruster)
thrusters.normalize()
for thruster in thrusters:
print thruster
thrusters.stop()
for thruster in thrusters:
print thruster
#thrusters4 = thrusters_struct([-2, -1, -0.5, 0, 0.5, 1, 0.5, 0])
#thrusters4.normalize()
#for thruster in thrusters4:
# print thruster"""
```
#### File: movement/tests/test_algorithm_handler.py
```python
import pytest
import time
from rov.movement.controls.Algorithm_Handler import Master_Algorithm_Handler
from random import *
buffer = 0.005
# last time
lt = time.time()
data = {'sensors':
{
'imu' :
{
'linear_acceleration' :
{
'x' : 1,
'y' : 1
},
'euler' :
{
'roll': 1,
'pitch': 1,
'yaw': 1
}
},
'pressure' :
{
'pressure': 1
}
}
}
def rand():
return (random() / 5.0) + 0.9
def rand2():
return random() * 0.001 - 0.0005
def update_data(user_input, data, lt):
dt = time.time() - lt
lt = time.time()
data['sensors']['imu']['linear_acceleration']['x'] += user_input[0] * dt * rand() + rand2()
data['sensors']['imu']['linear_acceleration']['y'] += user_input[1] * dt * rand() + rand2()
data['sensors']['pressure']['pressure'] += user_input[2] * dt * rand() + rand2()
data['sensors']['imu']['euler']['roll'] += user_input[3] * dt * rand() + rand2()
data['sensors']['imu']['euler']['pitch'] += user_input[4] * dt * rand() + rand2()
data['sensors']['imu']['euler']['yaw'] += user_input[5] * dt * rand() + rand2()
if data['sensors']['imu']['euler']['roll'] > 360:
data['sensors']['imu']['euler']['roll'] -= 360
elif data['sensors']['imu']['euler']['roll'] < 0:
data['sensors']['imu']['euler']['roll'] += 360
@pytest.fixture()
def position(data):
position = [0,0,0,0,0,0]
position[0] = data['sensors']['imu']['linear_acceleration']['x']
position[1] = data['sensors']['imu']['linear_acceleration']['y']
position[2] = data['sensors']['pressure']['pressure']
position[3] = data['sensors']['imu']['euler']['roll']
position[4] = data['sensors']['imu']['euler']['pitch']
position[5] = data['sensors']['imu']['euler']['yaw']
return position
@pytest.fixture()
def sensor_data():
return data['sensors']
def test_returns_empty_user_input_if_deactivated():
# initializes a control algorithm with the desired position of 4 for the z parameter
pass
def test_activate_and_deactivate_functionality():
# initializes a control algorithm with the desired position of 4 for the z parameter
frozen = [1,2,1,2,1,2]
user_input = [0,0,0,0,0,0]
mah = Master_Algorithm_Handler(frozen, sensor_data())
for i in range(100):
time.sleep(buffer)
update_data(mah.master(user_input, frozen), data, lt)
time.sleep(buffer)
user_input = [0.5, 0.1, 0.2, 0.3, 0.4, 0.5]
mah = Master_Algorithm_Handler(frozen, sensor_data())
for i in range(100):
time.sleep(buffer)
update_data(mah.master(user_input, frozen), data, lt)
for i in range(6):
time.sleep(buffer)
frozen = [2,1,2,1,2,1]
user_input = [0.5, 0.1, 0.2, 0.3, 0.21, 0.14]
for i in range(100):
time.sleep(buffer)
update_data(mah.master(user_input, frozen), data, lt)
for i in range(6):
time.sleep(buffer)
mah.master(user_input, frozen)[i]
frozen = [1,2,1,2,1,2]
user_input = [0.5, 0.1, 0.2, 0.3, 0.21, 0.14]
for i in range(100):
time.sleep(buffer)
update_data(mah.master(user_input, frozen), data, lt)
for i in range(6):
time.sleep(buffer)
mah.master(user_input, frozen)[i]
frozen = [3,3,3,3,3,3]
user_input = [0.5, 0.1, 0.2, 0.3, 0.21, 0.14]
for i in range(100):
time.sleep(buffer)
update_data(mah.master(user_input, frozen), data, lt)
for i in range(6):
time.sleep(buffer)
mah.master(user_input, frozen)[i]
mah.plot_data()
```
#### File: sensors/imu/IMU_Mock.py
```python
from math import sin, pi
class IMU(object):
"""Mock IMU class
"""
def __init__(self):
self.accel_x = 0
self.accel_y = 0
self.accel_z = 0
self.pitch = 0
self.roll = 0
self.yaw = 0
self._angle = 0
def update(self):
self._angle += 1
self.roll = sin(self._angle * (pi)/180.0)
@property
def data(self):
return {
'euler': {
'yaw': self.yaw,
'roll': self.roll,
'pitch': self.pitch,
},
'gyro': {
'x': 0,
'y': 0,
'z': 0,
},
'acceleration': {
'x': self.accel_x,
'y': self.accel_y,
'z': self.accel_z,
},
'linear_acceleration': {
'x': 0,
'y': 0,
'z': 0,
},
'temp': 0,
}
```
#### File: sensors/imu/__init__.py
```python
def IMU():
try:
from BNO055 import BNO055
return BNO055()
except Exception as e:
print "Failed to Initialize Sparkfun IMU"
print "Error: %s" % e.message
print "Using Mock IMU"
from IMU_Mock import IMU as IMU_Mock
return IMU_Mock()
```
#### File: X11-Core/tests/rovclass.py
```python
from rovcontroller import ROVControl, getDefaultPackets
import unittest
ROVIP = "localhost"
CMDPORT = 1944
class CanDevice(object):
def __init__(self, name, conconfig=""):
if type(name) != str:
raise TypeError("Name needs to be a string")
self.name = name
def connect(self):
# Connect the can device
pass
def disconnect(self):
# disconnect the can device
pass
class RovInterface(object):
def __init__(self, ip, port, devices=[], neton = True):
if neton:
self._netInter = ROVControl(ip, port)
else:
self._netInter = None
if any([type(d) != CanDevice for d in devices]):
raise TypeError("list is not can devices")
self._devices = {d.name: d for d in devices}
@property
def dev(self):
return self._devices
@property
def lsdev(self):
return self._devices.iterkeys()
def sendPacket(self,packet):
if self._netInter != None:
self._netInter.getFlask(packet)
def readPacket(self):
if self._netInter != None:
return self._devices.getClient()
else:
return {}
def killRov(self):
#kill the rov
for d in self._devices.itervalues():
d.disconnect()
class RovTest(unittest.TestCase):
def setUp(self):
d1 = CanDevice("d1")
d2 = CanDevice("d2")
d3 = CanDevice("d3")
self.rovI = RovInterface(ROVIP,CMDPORT,[d1,d2,d3],neton=False)
def tearDown(self):
self.rovI.killRov()
```
#### File: X11-Core/tests/rovtest.py
```python
import unittest
ROVIP = "localhost"
ROVPORT = 5000
class CanDevice(object):
def __init__(self,name,canConfFile=""):
if type(name) != str:
raise TypeError("Name needs to be a string you idiot")
self.name = name
def connect(self):
pass
def disconnect(self):
pass
def destroyDevice(self):
pass
class ROVInterface(object):
def __init__(self,rovip,port,devices = []):
self.sockInt = None #ROVControl(rovip,port)
if any([type(d) != CanDevice for d in devices]):
raise TypeError("Devices need to be CAN devices")
self._devices = {d.name:d for d in devices}
for d in self._devices.itervalues():
d.connect()
def sendPacket(self,packet):
self.sockInt.getFlask(packet)
def recvPacket(self):
return self.sockInt.getClient()
@property
def dev(self):
return self._devices
@property
def lsdev(self):
return list(self._devices.iterkeys())
def killrov(self):
pass
class ROVTest(unittest.TestCase):
def setUp(self):
# Turn on the rov
# Connect peripherals to the ROV
d1 = CanDevice("d1")
d2 = CanDevice("d2")
d3 = CanDevice("d3")
self.rovI = ROVInterface(ROVIP,ROVPORT,[d1,d2,d3])
def tearDown(self):
# Turn off rov
self.rovI.killrov()
``` |
{
"source": "Jie-OY/-",
"score": 3
} |
#### File: Jie-OY/-/app.py
```python
from flask import Flask, render_template, jsonify, request
from utils import wise
app = Flask(__name__)
def format(text):
d = dict()
r = text.split('\n')
for t in r:
if t is not '':
l = t.split()
d[l[0]] = l[1:]
return d
def output(d):
s = ''
for k, v in d.items():
str = k + '->' + '|'.join(v)
s += str +'\n'
return s
@app.route('/')
def index():
return render_template('index.html')
@app.route('/eliminate', methods=['POST'])
def eliminate():
G = request.json.get('G', None)
d = format(G)
print(d)
result = wise(d)
finally_result = output(result)
return jsonify(G_r=finally_result)
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True)
```
#### File: Jie-OY/-/utils.py
```python
import copy
from collections import defaultdict
# G = {'A': ['abc', 'ab', 'abcd', 'a', 'b', 'bc', 'B']}
# G['B'] = ['Ab', 'Ac']
# print(G)
VN_dict = defaultdict(int)
G_c = None
result = dict()
def eliminate_common_factor(VN, exp_list):
# G: {'A': ['abc', 'ab', 'abcd', 'a', 'b', 'bc', 'B']}
index_dict = get_common_factor(exp_list)
if have_common_factor(index_dict):
for k, v in index_dict.items():
len_k = len(k)
tmp_list = []
if len(v) != 1:
for i in v:
exp = exp_list[i][len_k:]
tmp_list.append(exp if exp else 'ε')
new_VN = generate_VN(VN)
exp_list.append(k + new_VN) # 加入aA'
G_c[new_VN] = tmp_list # 将新生成的规则添加进G_c文法中
remove_exp(index_dict, exp_list)
# print('VN:', VN)
result[VN] = exp_list
# print('result:', result)
G_c.pop(VN)
def remove_exp(index_dict, exp_list):
l = []
for v in index_dict.values():
if len(v) != 1:
l.extend(v)
# 在对一个list进行pop操作时,必须是按逆序来删除。
# 即,先删下标大的元素,再删下标小的元素,否则 会越界。
l = sorted(l, reverse=True)
for index in l:
exp_list.pop(index)
def get_common_factor_aux(exp_list):
"""
对于 G = {'A': 'abc ab abcd a b bc'}
:param exp_list: 文法G中每条产生式右部的集和的列表形式:['abc', 'ab', 'abcd', 'a', 'b', 'bc']
:return: 类型为字典,key为首字符,value为所在index:{'b': [4, 5], 'a': [0, 1, 2, 3]})
"""
d = defaultdict(list)
for index, exp in enumerate(exp_list):
d[exp[0]].append(index)
return d
def get_common_factor(exp_list):
d = get_common_factor_aux(exp_list)
index = 1
d_copy = copy.deepcopy(d)
for k, v in d.items():
new_k = copy.deepcopy(k)
k_copy = copy.deepcopy(k)
while True:
try:
c = exp_list[v[0]][index]
except Exception as e:
break
for i in v:
try:
char = exp_list[i][index]
except:
break
if char == c:
pass
else:
break
else:
new_k = k_copy + c
d_copy[new_k] = v
del d_copy[k_copy]
index += 1
k_copy = copy.deepcopy(new_k)
continue
if new_k == k_copy:
break
return d_copy
def have_common_factor(d):
for v in d.values():
if len(v) != 1:
return True
else:
return False
def generate_VN(VN):
VN_dict[VN[0]] += 1
return VN[0] + VN_dict[VN[0]] * "'"
def wise(G):
global G_c, VN_dict, result
VN_dict = defaultdict(int)
G_c = copy.deepcopy(G)
result = dict()
while True:
size = len(result)
try:
for k, v in G.items():
eliminate_common_factor(k, v)
except Exception as e:
raise e
G = copy.deepcopy(G_c)
if size == len(result):
break
return result
if __name__ == '__main__':
G = {'A': ['abc', 'ab', 'abcd', 'a', 'b', 'bc', 'B']}
G_c = copy.deepcopy(G)
while True:
size = len(result)
try:
for k, v in G.items():
eliminate_common_factor(k, v)
except Exception as e:
raise e
G = copy.deepcopy(G_c)
if size == len(result):
break
print(result)
``` |
{
"source": "jiep/liboqs",
"score": 2
} |
#### File: scripts/copy_from_upstream/update_pqclean_alg_docs.py
```python
import argparse
import os
import subprocess
import yaml
import inspect
parser = argparse.ArgumentParser()
parser.add_argument("--liboqs-root", default=os.path.join("..", ".."))
parser.add_argument("-w", "--write-changes", dest="write_changes", action='store_true')
parser.add_argument("-v", "--verbosity", type=int)
args = parser.parse_args()
if args.verbosity:
DEBUG = args.verbosity
else:
DEBUG = 0
if not args.write_changes:
print("--write-changes not set; changes will not be written out.")
def shell(command, expect=0):
subprocess_stdout = None if DEBUG > 0 else subprocess.DEVNULL
ret = subprocess.run(command, stdout=subprocess_stdout, stderr=subprocess_stdout)
if ret.returncode != expect:
raise Exception("'{}' failed with error {}. Expected {}.".format(" ".join(command), ret, expect))
def load_yaml(filename, encoding='utf-8'):
with open(filename, mode='r', encoding=encoding) as fh:
return yaml.safe_load(fh.read())
def store_yaml(filename, contents, encoding='utf-8'):
with open(filename, mode='w', encoding=encoding) as fh:
yaml.dump(contents, fh, sort_keys=False, allow_unicode=True)
def fetch_upstream(upstream_info):
work_dir_root = os.path.join(args.liboqs_root, 'scripts', 'copy_from_upstream', 'repos')
os.makedirs(work_dir_root, exist_ok=True)
work_dir = os.path.join(work_dir_root, upstream_info['name'])
work_dotgit = os.path.join(work_dir, '.git')
if not os.path.exists(work_dotgit):
shell(['git', 'init', work_dir])
shell(['git', '--git-dir', work_dotgit, 'remote', 'add', 'origin', upstream_info['git_url']])
shell(['git', '--git-dir', work_dotgit, '--work-tree', work_dir, 'remote', 'set-url', 'origin', upstream_info['git_url']])
shell(['git', '--git-dir', work_dotgit, '--work-tree', work_dir, 'fetch', '--depth=1', 'origin', upstream_info['git_commit']])
shell(['git', '--git-dir', work_dotgit, '--work-tree', work_dir, 'reset', '--hard', upstream_info['git_commit']])
return work_dir
def rhs_if_not_equal(lhs, rhs, not_equal_msg):
if lhs != rhs:
if DEBUG > 0:
caller = inspect.getframeinfo(inspect.stack()[1][0])
print("Line {}: Discrepancy in {}: lhs: {}, rhs: {}".format(caller.lineno, not_equal_msg, lhs, rhs))
return rhs
return lhs
def update_pqclean_kem_alg_docs(kems, upstream_info, pqclean_root, meta_yaml_path_template):
for kem in kems:
if kem['upstream_location'] == 'pqclean':
oqs_yaml_path = os.path.join(args.liboqs_root, 'docs', 'algorithms', 'kem', '{}.yml'.format(kem['name']))
if os.path.isfile(oqs_yaml_path):
oqs_yaml = load_yaml(oqs_yaml_path)
else:
continue
# We assume that the ordering of "parameter-sets"
# in the OQS YAML files matches that of copy_from_upstream.yml
for index, scheme in enumerate(kem['schemes']):
if DEBUG > 0:
print("Examining {}'s META.yml.".format(scheme['pretty_name_full']))
pqclean_meta_path = os.path.join(pqclean_root, meta_yaml_path_template.format_map(scheme))
pqclean_yaml = load_yaml(pqclean_meta_path)
oqs_yaml['type'] = rhs_if_not_equal(oqs_yaml['type'], pqclean_yaml['type'], "type")
oqs_yaml['principal-submitters'] = rhs_if_not_equal(oqs_yaml['principal-submitters'], pqclean_yaml['principal-submitters'], "principal-submitters")
upstream_base_url = upstream_info['git_url'][:-len(".git")]
oqs_yaml['upstream'] = rhs_if_not_equal(oqs_yaml['upstream'], "{}/commit/{}".format(upstream_base_url, upstream_info['git_commit']), "upstream")
if 'auxiliary-submitters' in pqclean_yaml:
oqs_yaml['auxiliary-submitters'] = rhs_if_not_equal(oqs_yaml['auxiliary-submitters'] if 'auxiliary-submitters' in oqs_yaml else '', pqclean_yaml['auxiliary-submitters'], "auxiliary-submitters")
oqs_scheme_yaml = oqs_yaml['parameter-sets'][index]
# TODO: PQClean and liboqs pretty-naming conventions for the
# following algorithms are out of sync.
if kem['name'] == 'classic_mceliece' or kem['name'] == 'hqc' or kem['name'] == 'ntru' or kem['name'] == 'saber':
oqs_scheme_yaml['name'] = rhs_if_not_equal(oqs_scheme_yaml['name'], scheme['pretty_name_full'], "scheme pretty name")
else:
oqs_scheme_yaml['name'] = rhs_if_not_equal(oqs_scheme_yaml['name'], pqclean_yaml['name'], "scheme pretty name")
oqs_scheme_yaml['claimed-nist-level'] = rhs_if_not_equal(oqs_scheme_yaml['claimed-nist-level'], pqclean_yaml['claimed-nist-level'], "claimed-nist-level")
oqs_scheme_yaml['claimed-security'] = rhs_if_not_equal(oqs_scheme_yaml['claimed-security'], pqclean_yaml['claimed-security'], "claimed-security")
oqs_scheme_yaml['length-public-key'] = rhs_if_not_equal(oqs_scheme_yaml['length-public-key'], pqclean_yaml['length-public-key'], "length-public-key")
oqs_scheme_yaml['length-ciphertext'] = rhs_if_not_equal(oqs_scheme_yaml['length-ciphertext'], pqclean_yaml['length-ciphertext'], "length-ciphertext")
oqs_scheme_yaml['length-secret-key'] = rhs_if_not_equal(oqs_scheme_yaml['length-secret-key'], pqclean_yaml['length-secret-key'], "legnth-secret-key")
oqs_scheme_yaml['length-shared-secret'] = rhs_if_not_equal(oqs_scheme_yaml['length-shared-secret'], pqclean_yaml['length-shared-secret'], "length-shared-secret")
for impl_index, impl in enumerate(oqs_scheme_yaml['implementations']):
for pqclean_impl in pqclean_yaml['implementations']:
if impl['upstream-id'] == pqclean_impl['name']:
break
if 'supported_platforms' in pqclean_impl:
impl['supported-platforms'] = rhs_if_not_equal(impl['supported-platforms'], pqclean_impl['supported_platforms'], "supported-platforms")
else:
impl['supported-platforms'] = rhs_if_not_equal(impl['supported-platforms'], "all", "supported-platforms")
oqs_scheme_yaml['implementations'][impl_index] = impl
oqs_yaml['parameter-sets'][index] = oqs_scheme_yaml
if args.write_changes:
store_yaml(oqs_yaml_path, oqs_yaml)
def update_pqclean_sig_alg_docs(sigs, upstream_info, pqclean_root, meta_yaml_path_template):
for sig in sigs:
if sig['upstream_location'] == 'pqclean':
oqs_yaml_path = os.path.join(args.liboqs_root, 'docs', 'algorithms', 'sig', '{}.yml'.format(sig['name']))
if os.path.isfile(oqs_yaml_path):
oqs_yaml = load_yaml(oqs_yaml_path)
else:
continue
# We assume that the ordering of "parameter-sets"
# in the OQS YAML files matches that of copy_from_upstream.yml
for index, scheme in enumerate(sig['schemes']):
if DEBUG > 0:
print("Examining {}'s META.yml.".format(scheme['pretty_name_full']))
pqclean_meta_path = os.path.join(pqclean_root, meta_yaml_path_template.format_map(scheme))
pqclean_yaml = load_yaml(pqclean_meta_path)
oqs_yaml['type'] = rhs_if_not_equal(oqs_yaml['type'], pqclean_yaml['type'], "type")
oqs_yaml['principal-submitters'] = rhs_if_not_equal(oqs_yaml['principal-submitters'], pqclean_yaml['principal-submitters'], "principal-submitters")
upstream_base_url = upstream_info['git_url'][:-len(".git")]
oqs_yaml['upstream'] = rhs_if_not_equal(oqs_yaml['upstream'], "{}/commit/{}".format(upstream_base_url, upstream_info['git_commit']), "upstream")
if 'auxiliary-submitters' in pqclean_yaml:
oqs_yaml['auxiliary-submitters'] = rhs_if_not_equal(oqs_yaml['auxiliary-submitters'] if 'auxiliary-submitters' in oqs_yaml else '', pqclean_yaml['auxiliary-submitters'], "auxiliary-submitters")
oqs_scheme_yaml = oqs_yaml['parameter-sets'][index]
# TODO: PQClean and liboqs pretty-naming conventions for the
# following algorithms are out of sync.
if sig['name'] == 'sphincs' or sig['name'] == 'rainbow':
oqs_scheme_yaml['name'] = rhs_if_not_equal(oqs_scheme_yaml['name'], scheme['pretty_name_full'], "scheme pretty name")
else:
oqs_scheme_yaml['name'] = rhs_if_not_equal(oqs_scheme_yaml['name'], pqclean_yaml['name'], "scheme pretty name")
oqs_scheme_yaml['claimed-nist-level'] = rhs_if_not_equal(oqs_scheme_yaml['claimed-nist-level'], pqclean_yaml['claimed-nist-level'], "claimed-nist-level")
oqs_scheme_yaml['claimed-security'] = rhs_if_not_equal(oqs_scheme_yaml['claimed-security'], 'EUF-CMA', "claimed-security")
oqs_scheme_yaml['length-public-key'] = rhs_if_not_equal(oqs_scheme_yaml['length-public-key'], pqclean_yaml['length-public-key'], "length-public-key")
oqs_scheme_yaml['length-secret-key'] = rhs_if_not_equal(oqs_scheme_yaml['length-secret-key'], pqclean_yaml['length-secret-key'], "legnth-secret-key")
oqs_scheme_yaml['length-signature'] = rhs_if_not_equal(oqs_scheme_yaml['length-signature'], pqclean_yaml['length-signature'], "length-signature")
for impl_index, impl in enumerate(oqs_scheme_yaml['implementations']):
for pqclean_impl in pqclean_yaml['implementations']:
if impl['upstream-id'] == pqclean_impl['name']:
break
if 'supported_platforms' in pqclean_impl:
impl['supported-platforms'] = rhs_if_not_equal(impl['supported-platforms'], pqclean_impl['supported_platforms'], "supported-platforms")
else:
impl['supported-platforms'] = rhs_if_not_equal(impl['supported-platforms'], "all", "supported-platforms")
oqs_scheme_yaml['implementations'][impl_index] = impl
oqs_yaml['parameter-sets'][index] = oqs_scheme_yaml
if args.write_changes:
store_yaml(oqs_yaml_path, oqs_yaml)
instructions = load_yaml(
os.path.join(args.liboqs_root, 'scripts', 'copy_from_upstream', 'copy_from_upstream.yml'),
encoding='utf-8')
for upstream in instructions['upstreams']:
if upstream['name'] == 'pqclean':
pqclean_info = upstream
break
pqclean_root = fetch_upstream(pqclean_info)
update_pqclean_kem_alg_docs(instructions['kems'], pqclean_info, pqclean_root, pqclean_info['kem_meta_path'])
update_pqclean_sig_alg_docs(instructions['sigs'], pqclean_info, pqclean_root, pqclean_info['sig_meta_path'])
``` |
{
"source": "jiep/unicode-similarity",
"score": 2
} |
#### File: jiep/unicode-similarity/similarity.py
```python
import numpy as np
import os
import sys
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras.applications import VGG16
from sklearn.metrics.pairwise import cosine_similarity
import argparse
from pathlib import Path
import errno
def remove_extension(x): return x.split('.')[0]
def file_exists(path):
return Path(path).is_file()
def dir_exists(path):
return Path(path).is_dir()
def print_error(type, file):
print(FileNotFoundError(errno.ENOENT,
'The {} {} does not exist'.format(type, file)))
def similarity_pairs(dir, output, verbose=False):
latin_characters = [format(i + 32, '05x') for i in range(96)]
unicode_characters = sorted(os.listdir(dir))
model = VGG16(include_top=False, weights='imagenet')
with open(output, 'a') as file:
to_file = ' ' + ' '.join(list(map(remove_extension,
unicode_characters))) + '\n'
file.write(to_file)
for i, latin_character in enumerate(latin_characters):
with open(output, 'a') as file:
to_file = latin_character
for j, unicode_chracter in enumerate(unicode_characters):
latin_path = os.path.join(dir, latin_character) + ".png"
latin_img = load_img(latin_path)
unicode_path = os.path.join(dir, unicode_chracter)
unicode_img = load_img(unicode_path)
latin_pred = model.predict(latin_img).reshape(1, -1)
unicode_pred = model.predict(unicode_img).reshape(1, -1)
sim = similarity(latin_pred, unicode_pred)
to_file = to_file + ' ' + str(sim)
if verbose:
print(('Similarity between '
'{} and {}: {}').format(latin_character,
remove_extension(
unicode_chracter), sim))
to_file = to_file + '\n'
file.write(to_file)
to_file = ''
def similarity(x, y):
return np.asscalar(cosine_similarity(x, y))
def load_img(path):
img = image.load_img(path, target_size=(224, 224),
grayscale=False, interpolation='bilinear')
x = image.img_to_array(img)
x = preprocess_input(x)
x = np.expand_dims(x, axis=0)
return x
def main():
parser = argparse.ArgumentParser(description='Compute the similarity '
'between Unicode and latin '
'characters by using '
'transfer learning.')
parser.add_argument('-i', '--images', default='images')
parser.add_argument('-o', '--output', default='similarities.txt')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
images = args.images
output = args.output
verbose = args.verbose
if file_exists(output):
print('Removing {}...'.format(output))
os.remove(output)
if not dir_exists(images):
print_error('directory', images)
sys.exit(1)
similarity_pairs(images, output, verbose)
if __name__ == '__main__':
main()
```
#### File: jiep/unicode-similarity/threshold.py
```python
from pathlib import Path
import numpy as np
import pickle
import argparse
import errno
import sys
def file_exists(path):
return Path(path).is_file()
def dir_exists(path):
return Path(path).is_dir()
def remove_extension(x): return x.split('.')[0]
def print_error(type, file):
print(FileNotFoundError(errno.ENOENT,
'The {} {} does not exist'.format(type, file)))
def calculate_threshold(similarity, output='confusables',
threshold=0.8, verbose=False):
lines = [line.rstrip('\n') for line in open(similarity)]
unicode_characters = np.asarray(lines[0].split(' ')[1:])
data = {}
data['threshold'] = threshold
data['characters'] = {}
for l in lines[1:]:
line = l.split(' ')
latin = line[0]
del line[0]
similarity_row = np.asarray(line, dtype=np.float)
indexes = np.where(similarity_row >= threshold)
data['characters'][latin] = unicode_characters[np.asarray(indexes[0])]\
.tolist()
chars = unicode_characters[np.asarray(indexes[0])].tolist()
if(verbose):
print('[{}] {}: {}'.format(len(chars), latin, ','.join(chars)))
output = '{}-{}.pickle'.format(output, int(threshold*100))
with open(output, 'wb') as f:
pickle.dump(data, f)
def main():
parser = argparse.ArgumentParser(description='Filter Unicode characters '
'based on a given threshold '
'between 0 and 1 '
'and a similarity matrix')
parser.add_argument('-s', '--similarity', default='similarities.txt')
parser.add_argument('-t', '--threshold', default=0.8, type=float)
parser.add_argument('-o', '--output', default='confusables')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
similarity = args.similarity
threshold = args.threshold
output = args.output
verbose = args.verbose
if not file_exists(similarity):
print_error('file', similarity)
sys.exit(1)
calculate_threshold(similarity, output, threshold, verbose)
if __name__ == '__main__':
main()
``` |
{
"source": "jiep/v2d-cli",
"score": 3
} |
#### File: deep_confusables/utils/files.py
```python
from pathlib import Path
DIR_NAME = '.unicode'
def file_exists(path):
file = Path(path)
return file.is_file()
def dir_exists(path):
file = Path(path)
return file.is_dir()
def create_home_directory():
home = Path.home()
full_path = home.joinpath(DIR_NAME)
full_path.mkdir(exist_ok=True, parents=True)
def join(file):
return home_directory().joinpath(file)
def exists_file_home(file):
return file_exists(home_directory().joinpath(file))
def exists_dir_home(dir):
return dir_exists(home_directory().joinpath(dir))
def home_directory():
return Path.home().joinpath(DIR_NAME)
``` |
{
"source": "jie/pyapp_skeleton",
"score": 3
} |
#### File: pyapp_skeleton/libs/email_libs.py
```python
import logging
import smtplib
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
# from email.mime.multipart import MIMEMultipart
from exchangelib import DELEGATE, Account, Credentials, Configuration, Message, \
Mailbox, HTMLBody, FileAttachment
logger = logging.getLogger(__name__)
def send_email(display, username, password, host, port, sendto, title,
content, ssl=False, timeout=10, debug=0):
if not isinstance(sendto, list):
sendto = [sendto]
me = "%s<%s>" % (display, username)
msg = MIMEText(content, 'html', 'utf-8')
msg['Subject'] = title
msg['From'] = me
msg['To'] = ';'.join(sendto)
if ssl:
server = smtplib.SMTP_SSL('%s:%d' % (host, port), timeout=timeout)
else:
server = smtplib.SMTP('%s:%d' % (host, port), timeout=timeout)
server.set_debuglevel(debug)
server.login(username, password)
server.sendmail(me, sendto, msg.as_string())
server.quit()
def addimg(src, imgid):
fp = open(src, 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
msgImage.add_header('Content-ID', imgid)
return msgImage
def send_with_exchange(username, password, server, address, content,
subject='', to_recipients=[], attachements=[]):
credentials = Credentials(username=username, password=password)
config = Configuration(server=server, credentials=credentials)
account = Account(
primary_smtp_address=address,
autodiscover=False,
config=config,
credentials=credentials,
access_type=DELEGATE)
_to_recipients = []
for item in to_recipients:
_to_recipients.append(Mailbox(email_address=item['email']))
m = Message(
account=account,
subject=subject,
body=HTMLBody(content),
to_recipients=_to_recipients)
if attachements:
for item in attachements:
with open(item['src'], 'rb') as f:
img_attach = FileAttachment(name=item['name'], content=f.read())
m.attach(img_attach)
m.send()
```
#### File: pyapp_skeleton/libs/service.py
```python
import tornado
import os
import logging
from dragonlib.web.micro_service import MicroService as MyMicroService
logger = logging.getLogger(__name__)
class MicroService(MyMicroService):
def init_database(self):
pass
def start(self):
port = self.port or self.getSetting('PORT')
self.application.listen(port, xheaders=True)
print('@starting development: %s' % port)
tornado.ioloop.IOLoop.instance().start()
def init_logger(self):
super(MicroService, self).init_logger()
# if self.getSetting('SENTRY_URL') and self.getSetting('DEPLOY') == 'production':
# import sentry_sdk
# sentry_sdk.init(self.getSetting('SENTRY_URL'))
``` |
{
"source": "jiepy/pydata",
"score": 3
} |
#### File: pydata/pyscripts/GetOS.py
```python
import os
import platform
from collections import OrderedDict
def os_version():
Info=platform.uname()
distr=platform.linux_distribution()
print '-' * 50
print u" OS 类型 : %s " % Info[0]
print u" OS 版本 : %s_%s_%s " % (distr[0],distr[1],distr[2])
print u" OS 主机名 : %s " % Info[1]
print u" OS 内核版本 : %s " % Info[2]
print ''
def meminfo():
''' Return the information in /proc/meminfo
as a dictionary '''
meminfo=OrderedDict()
with open('/proc/meminfo') as f:
for line in f:
meminfo[line.split(':')[0]] = line.split(':')[1].strip()
return meminfo
""" print out the /proc/cpuinfo
file
"""
def cpuinfo():
cpuinfo=OrderedDict()
procinfo=OrderedDict()
nprocs=0
with open('/proc/cpuinfo') as f:
for line in f:
if not line.strip():
cpuinfo['porc%s' % nprocs] = procinfo
nprocs=nprocs+1
procinfo=OrderedDict()
else:
if len(line.split(':')) == 2:
procinfo[line.split(':')[0].strip()]=line.split(':')[1].strip()
else:
procinfo[line.split(':')[0].strip()] = ''
return cpuinfo
def load_stat():
loadavg = {}
f = open("/proc/loadavg")
con=f.read().split()
f.close()
loadavg['lavg_1']=con[0]
loadavg['lavg_5']=con[1]
loadavg['lavg_15']=con[2]
return loadavg
def uptime_stat():
uptime = {}
f = open('/proc/uptime')
ut=f.read().split()
f.close()
all_sec=float(ut[0])
MINUTE,HOUR,DAY = 60,3600,86400
uptime['day'] = int(all_sec / DAY)
uptime['hour'] = int((all_sec % DAY ) / HOUR)
uptime['minute'] = int((all_sec % HOUR) / MINUTE)
uptime['second'] = int(all_sec % MINUTE)
uptime['Free rate'] = float(ut[1]) / float(ut[0])
return uptime
"""
List of all process IDs currently active
"""
def process_list():
pids=[]
for subdir in os.listdir('/proc'):
if subdir.isdigit():
pids.append(subdir)
return pids
if __name__ == '__main__':
os_version()
cpuinfo = cpuinfo()
for processor in cpuinfo.keys():
print "Cpu ___型号 : %s" % (cpuinfo[processor]['model name'])
break
ln=len(cpuinfo)
print 'Cpu逻辑核心数: %s\n' % ln
meminfo=meminfo()
total=int(meminfo['MemTotal'].split()[0])
free=int(meminfo['MemFree'].split()[0])
buffers=int(meminfo['Buffers'].split()[0])
cached=int(meminfo['Cached'].split()[0])
swap=int(meminfo['SwapTotal'].split()[0])
MEMUsedPerc=100 * float('%0.2f' % (float(total-free-buffers-cached)/total))
print '''内存使用情况 :
内存使用率 : %s%%
物理内存 : %s M
空闲内存 : %s M
Swap 总空间 : %s M\n''' % (MEMUsedPerc,total/1024,free/1024,swap/1024)
up_stat = uptime_stat()
print '系统运行时间 : %s天%s小时%s分%s秒' \
% (up_stat['day'],up_stat['hour'],\
up_stat['minute'],up_stat['second'])
print '服务器空闲率 : %s%%' % (float('%0.2f'%up_stat['Free rate'])*100)
load=load_stat()
print 'load average : %s | %s | %s' % \
(load['lavg_1'],load['lavg_5'],load['lavg_15'])
pids = process_list()
print 'OS 总进程数 : %s' % (len(pids))
print '-' * 50
``` |
{
"source": "jieqin-ai/AMR",
"score": 3
} |
#### File: AMR/net/resnet50_amr.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from misc import torchutils
from net import resnet50
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.resnet50 = resnet50.resnet50(pretrained=True, strides=(2, 2, 2, 1))
self.stage1 = nn.Sequential(self.resnet50.conv1, self.resnet50.bn1, self.resnet50.relu, self.resnet50.maxpool,
self.resnet50.layer1)
self.stage2 = nn.Sequential(self.resnet50.layer2)
self.stage3 = nn.Sequential(self.resnet50.layer3)
self.stage4 = nn.Sequential(self.resnet50.layer4)
self.classifier = nn.Conv2d(2048, 20, 1, bias=False)
self.resnet50_2 = resnet50.resnet50(pretrained=True, use_amm=True, strides=(2, 2, 2, 1))
self.stage2_1 = nn.Sequential(self.resnet50_2.conv1, self.resnet50_2.bn1, self.resnet50_2.relu, self.resnet50_2.maxpool,
self.resnet50_2.layer1)
self.stage2_2 = nn.Sequential(self.resnet50_2.layer2)
self.stage2_3 = nn.Sequential(self.resnet50_2.layer3)
self.stage2_4 = nn.Sequential(self.resnet50_2.layer4)
self.classifier2 = nn.Conv2d(2048, 20, 1, bias=False)
self.backbone = nn.ModuleList([self.stage1, self.stage2, self.stage3, self.stage4])
self.backbone2 = nn.ModuleList([self.stage2_1, self.stage2_2, self.stage2_3, self.stage2_4])
self.newly_added = nn.ModuleList([self.classifier, self.classifier2])
def forward(self, x):
x_ori = x.clone()
# # branch1
x = self.stage1(x).detach()
x = self.stage2(x).detach()
x = self.stage3(x).detach()
x = self.stage4(x).detach()
cam = F.conv2d(x, self.classifier.weight)
cam = F.relu(cam)
cam = cam[0] + cam[1].flip(-1)
x = torchutils.gap2d(x, keepdims=True)
x = self.classifier(x).detach()
x = x.view(-1, 20)
# # branch2
x2 = self.stage2_1(x_ori).detach()
x2 = self.stage2_2(x2)
x2 = self.stage2_3(x2)
x2 = self.stage2_4(x2)
cam2 = F.conv2d(x2, self.classifier2.weight)
cam2 = F.relu(cam2)
cam2 = cam2[0] + cam2[1].flip(-1)
x2 = torchutils.gap2d(x2, keepdims=True)
x2 = self.classifier2(x2)
x2 = x2.view(-1, 20)
return x, cam, x2, cam2
def train(self, mode=True):
for p in self.resnet50.conv1.parameters():
p.requires_grad = False
for p in self.resnet50.bn1.parameters():
p.requires_grad = False
for p in self.resnet50_2.conv1.parameters():
p.requires_grad = False
for p in self.resnet50_2.bn1.parameters():
p.requires_grad = False
def trainable_parameters(self):
return (list(self.backbone.parameters()), list(self.backbone2.parameters()), list(self.newly_added.parameters()))
class CAM(Net):
def __init__(self):
super(CAM, self).__init__()
def forward(self, x, step=1):
x_ori = x.clone()
# branch1
if step == 1:
x = self.stage1(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
cam1 = F.conv2d(x, self.classifier.weight)
return cam1
# # branch2
if step == 2:
x2 = self.stage2_1(x_ori)
x2 = self.stage2_2(x2)
x2 = self.stage2_3(x2)
x2 = self.stage2_4(x2)
cam2 = F.conv2d(x2, self.classifier2.weight)
return cam2
``` |
{
"source": "JieqingShi/CarND-Advanced-Lane-Lines",
"score": 3
} |
#### File: JieqingShi/CarND-Advanced-Lane-Lines/lane_find.py
```python
import numpy as np
import cv2
def hist(img):
"""
Find histogram (aka sum of pixel values) of binary image from bottom half of image
Returns:
histogram: sum of pixel values of bottom half of pictures
"""
bottom_half = img[img.shape[0]//2:,:] # 0:img.shape[0]//2 is the top half
histogram = bottom_half.sum(axis=0)
return histogram
def fit_poly(img_shape, leftx, lefty, rightx, righty):
"""
Fit polynomial of second degree on image of size img_shape for left lane and right lane x and y coordinates
Returns:
left_fitx: x-values of fitted polynomial curve for left lane
right_fitx: x-values of fitted polynomial curve for right lane
ploty: y-values of fitted polynomial curve (same for left and right lane)
"""
left_fit = np.polyfit(lefty, leftx, deg=2)
right_fit = np.polyfit(righty, rightx, deg=2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
### TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
return left_fitx, right_fitx, ploty
def find_lanes_from_scratch(binary_warped, nwindows=9, margin=100, minpix=50, draw_lanes=True):
"""
Detects lane pixels from scratch using sliding windows on the histogram peaks of the binary filtered image
Returns:
left_fitx: x-values of fitted polynomial curve of detected left-lane
right_fitx: x-values of fitted polynomial curve of detected right-lane
left_fit: parameters of fitted polynomial of second order of left lane
right_fit: parameters of fitted polynomial of second order of right lane
ploty: y-values of fitted polynomial curve (same for left and right)
out_img: image with marked pixels and sliding windows for left and right lane
detected_flag: boolean flag, always True
"""
# Find histogram peaks
histogram = hist(binary_warped)
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) # create output image to draw on (not necessary)
midpoint = np.int(histogram.shape[0]//2) # 640
leftx_base = np.argmax(histogram[:midpoint]) # find index of left peak (indicates ll)
rightx_base = np.argmax(histogram[midpoint:]) + midpoint # find index of right peak (indicates rl)
# Sliding windows
window_height = np.int(binary_warped.shape[0]//nwindows) # 80
nonzero = binary_warped.nonzero() # a tuple for x and y
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_inds = []
right_lane_inds = []
for window in range(nwindows): # index 0 to 8
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
if draw_lanes:
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2) # bottom left to top right, in green, with thickness 2
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
good_left_inds = ((nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high) &
(nonzeroy >= win_y_low ) & (nonzeroy < win_y_high)).nonzero()[0]
good_right_inds = ((nonzerox >= win_xright_low) & (nonzerox < win_xright_high) &
(nonzeroy >= win_y_low ) & (nonzeroy < win_y_high)).nonzero()[0]
left_lane_inds.append(good_left_inds) # indices
right_lane_inds.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = int(np.mean(nonzerox[good_left_inds]))
# print(leftx_current)
if len(good_right_inds) > minpix:
rightx_current = int(np.mean(nonzerox[good_right_inds]))
# Find indices of left and right lane lines
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit polynomial of second degree
left_fit = np.polyfit(lefty, leftx, deg=2)
right_fit = np.polyfit(righty, rightx, deg=2)
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
detected_flag = True
# Visualize
if draw_lanes:
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
for index in range(out_img.shape[0]-1):
cv2.line(out_img, (int(left_fitx[index]), int(ploty[index])), (int(left_fitx[index+1]), int(ploty[index+1])), (255,255,0), 3)
cv2.line(out_img, (int(right_fitx[index]), int(ploty[index])), (int(right_fitx[index+1]), int(ploty[index+1])), (255,255,0), 3)
return left_fitx, right_fitx, left_fit, right_fit, ploty, out_img, detected_flag
def find_lanes_from_prior(binary_warped, left_fit, right_fit, margin=100, draw_lanes=True):
"""
Detects lane pixels by searching in margin around previous lane line position
Returns:
left_fitx: x-values of fitted polynomial curve of detected left-lane
right_fitx: x-values of fitted polynomial curve of detected right-lane
left_fit: parameters of fitted polynomial of second order of left lane (no modification done; passed directly from input)
right_fit: parameters of fitted polynomial of second order of right lane (no modification done; passed directly from input)
ploty: y-values of fitted polynomial curve (same for left and right)
out_img: image with marked pixels and sliding windows for left and right lane
detected_flag: boolean flag; is True if lane lines are found, False if not found
"""
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
left_lane_inds = ((nonzerox >= left_fit[0]*nonzeroy**2 +
left_fit[1]*nonzeroy + left_fit[2] - margin) &
(nonzerox < left_fit[0]*nonzeroy**2 +
left_fit[1]*nonzeroy + left_fit[2] + margin)).nonzero()[0]
right_lane_inds = ((nonzerox >= right_fit[0]*nonzeroy**2 +
right_fit[1]*nonzeroy + right_fit[2] - margin) &
(nonzerox < right_fit[0]*nonzeroy**2 +
right_fit[1]*nonzeroy + right_fit[2] + margin)).nonzero()[0]
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
if leftx.size == 0 or rightx.size == 0:
detected_flag = False
return None, None, None, None, None, out_img, False # super ugly
else:
detected_flag = True
# Fit new polynomials
left_fitx, right_fitx, ploty = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
## Visualization ##
if draw_lanes:
window_img = np.zeros_like(out_img)
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255,0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255,0))
out_img = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
for index in range(out_img.shape[0]-1):
cv2.line(out_img, (int(left_fitx[index]), int(ploty[index])), (int(left_fitx[index+1]), int(ploty[index+1])), (255,255,0), 3)
cv2.line(out_img, (int(right_fitx[index]), int(ploty[index])), (int(right_fitx[index+1]), int(ploty[index+1])), (255,255,0), 3)
return left_fitx, right_fitx, left_fit, right_fit, ploty, out_img, detected_flag
def measure_curvature(ploty, left_fitx, right_fitx):
"""
Calculates the curvature of polynomial functions in meters.
Returns:
left_curverad: curvature of left lane line
right_curverad: curvature of right lane line
"""
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
left_fitx = left_fitx[::-1]
right_fitx = right_fitx[::-1]
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
y_eval = np.max(ploty)
left_curverad = (1+(2*left_fit_cr[0]*y_eval*ym_per_pix+left_fit_cr[1])**2)**(3/2)/(2*np.abs(left_fit_cr[0]))
right_curverad = (1+(2*right_fit_cr[0]*y_eval*ym_per_pix+right_fit_cr[1])**2)**(3/2)/(2*np.abs(right_fit_cr[0]))
return left_curverad, right_curverad
def measure_offset(left_fitx, right_fitx, midpoint=640):
"""
Calculates offset from center of image from positions of left and right lane lines
Returns:
offset: offset from center
"""
return (midpoint-(right_fitx[-1]+left_fitx[-1])/2)*3.7/700
``` |
{
"source": "jieqiong-pang/DSCI560-HW5",
"score": 3
} |
#### File: jieqiong-pang/DSCI560-HW5/resulting.py
```python
import pandas as pd
from bokeh.plotting import figure, curdoc
from bokeh.models import HoverTool, Title
from bokeh.transform import factor_cmap
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.layouts import column, row
from bokeh.models.widgets import Select
# region 1. question a
# set up data
latimes_agency_totals = pd.read_csv("latimes-state-totals.csv")
latimes_agency_totals = latimes_agency_totals[
(latimes_agency_totals["date"] >= "2020-08-01") & (latimes_agency_totals["date"] <= "2020-08-31")]
latimes_agency_totals['date_time'] = pd.to_datetime(latimes_agency_totals['date'])
# set up plot
p1 = figure(
tools='crosshair,pan,box_zoom',
y_axis_label='New Coronavirus cases',
x_axis_label='datetime',
x_axis_type='datetime',
plot_width=1030,
plot_height=500,
)
p1.title.text = "New Coronavirus Cases in California in August"
p1.title.align = "center"
p1.title.text_font_size = "20px"
p1.align = "center"
p1.add_layout(Title(text=" download data from 'https://github.com/datadesk/california-coronavirus-data' ("
"latimes-state-totals.csv) in GitHub", text_font_style="italic"), 'above')
p1.add_layout(Title(text="Source: provided by local public health agencies; published by "
"'latimes.com/coronavirustracker'", text_font_style="italic"), 'above')
p1.add_layout(Title(text="Date of last update: 2020-10-15", text_font_style="italic"), 'above')
p1.line('date_time', 'new_confirmed_cases', source=latimes_agency_totals)
p1.circle('date_time', 'new_confirmed_cases', source=latimes_agency_totals, fill_color="blue", size=5)
p1.add_tools(HoverTool(
tooltips=[
('date', '@date_time{%Y-%m-%d}'),
('new cases', '@new_confirmed_cases'),
],
formatters={
'@date_time': 'datetime',
}))
# endregion
# region 2. question b
data = pd.read_csv("cdph-race-ethnicity.csv")
race_total = data.loc[data['age'] == 'all']
race_total = race_total[["date", "race", "confirmed_cases_percent", "deaths_percent", "population_percent"]]
total = race_total[["date", "race", "confirmed_cases_percent", "deaths_percent", "population_percent"]]
races = sorted(set(total["race"].tolist()))
death = total["deaths_percent"].tolist()
case = total["confirmed_cases_percent"].tolist()
population = total["population_percent"].tolist()
bar2 = ['confirmed_cases', 'population']
bar3 = ['death', 'population']
date = sorted(set(race_total['date']), reverse=True)
# set up data
x2 = [(race, bar) for race in races for bar in bar2]
y2 = sum(zip(case, population), ())
source2 = ColumnDataSource(data=dict(x=x2, y=y2))
# set up plot
p2 = figure(x_range=FactorRange(*x2), plot_height=550,plot_width=1030,
y_axis_label='percent', x_axis_label='race',
toolbar_location=None, tools="")
p2.title.text = "Confirmed_case% VS Population%"
p2.title.align = "center"
p2.title.text_font_size = "20px"
p2.add_layout(Title(text=" published by 'latimes.com/coronavirustracker'; download data from "
"'https://github.com/datadesk/california-coronavirus-data' (cdph-race-ethnicity.csv) in "
"GitHub", text_font_style="italic"), 'above')
p2.add_layout(Title(text="Source: provided by the California Department of Public Health "
"'https://www.cdph.ca.gov/Programs/CID/DCDC/Pages/COVID-19/Race-Ethnicity.aspx';",
text_font_style="italic"), 'above')
p2.add_layout(Title(text="Date of last update: 2020-10-14", text_font_style="italic"), 'above')
r2 = p2.vbar(x='x', top='y', width=0.9, source=source2, line_color="white",
fill_color=factor_cmap('x', palette=["#c9d9d3", "#718dbf"], factors=bar2, start=1, end=2))
p2.y_range.start = 0
p2.x_range.range_padding = 0.1
p2.xaxis.major_label_orientation = 0.8
p2.xgrid.grid_line_color = None
p2.add_tools(HoverTool(
tooltips=[
('confirmed_case', '@x'),
('population', '@y'),
]))
# set up widgets
select1 = Select(title="Confirmed case date:", value=date[0], options=date, width=105)
# set up callbacks
def update2(attrname, old, new):
selected_data = race_total[race_total['date'] == select1.value]
a = selected_data['confirmed_cases_percent']
b = selected_data["population_percent"]
y = sum(zip(a, b), ())
r2.data_source.data['y'] = y
select1.on_change('value', update2)
# endregion
# region 3. question c
# set up data
x3 = [(race, bar) for race in races for bar in bar3]
y3 = sum(zip(death, population), ())
source3 = ColumnDataSource(data=dict(x=x3, y=y3))
# set up plot
p3 = figure(x_range=FactorRange(*x3), plot_height=550,plot_width=1030,
y_axis_label='percent',
x_axis_label='race',
toolbar_location=None, tools="")
p3.title.text = "Death% VS Population%"
p3.title.align = "center"
p3.title.text_font_size = "20px"
p3.add_layout(Title(text=" published by 'latimes.com/coronavirustracker'; download data from "
"'https://github.com/datadesk/california-coronavirus-data' (cdph-race-ethnicity.csv) in "
"GitHub", text_font_style="italic"), 'above')
p3.add_layout(Title(text="Source: provided by the California Department of Public Health "
"'https://www.cdph.ca.gov/Programs/CID/DCDC/Pages/COVID-19/Race-Ethnicity.aspx';",
text_font_style="italic"), 'above')
p3.add_layout(Title(text="Date of last update: 2020-10-14", text_font_style="italic"), 'above')
r3 = p3.vbar(x='x', top='y', width=0.9, source=source3, line_color="white",
fill_color=factor_cmap('x', palette=["#c9d9d3", "#718dbf"], factors=bar3, start=1, end=2))
p3.y_range.start = 0
p3.x_range.range_padding = 0.1
p3.xaxis.major_label_orientation = 0.8
p3.xgrid.grid_line_color = None
p3.add_tools(HoverTool(
tooltips=[
('death', '@x'),
('population', '@y'),
]))
# set up widgets
select2 = Select(title="Death date:", value=date[0], options=date, width=105)
# set up callbacks
def update3(attrname, old, new):
selected_data = race_total[race_total['date'] == select2.value]
a = selected_data['deaths_percent']
b = selected_data["population_percent"]
y = sum(zip(a, b), ())
r3.data_source.data['y'] = y
select2.on_change('value', update3)
# endregion
# set up layouts and add to document
curdoc().add_root(column(p1))
curdoc().add_root(row(p2, select1))
curdoc().add_root(row(p3, select2))
``` |
{
"source": "jieqiu0630/ProMP",
"score": 2
} |
#### File: ProMP/run_scripts/pro-mp_run_mario.py
```python
from meta_policy_search.baselines.linear_baseline import LinearFeatureBaseline
from meta_policy_search.envs.point_envs.point_env_2d_corner import MetaPointEnvCorner
from meta_policy_search.envs.normalized_env import normalize
from meta_policy_search.meta_algos.pro_mp import ProMP
from meta_policy_search.meta_trainer import Trainer
from meta_policy_search.samplers.meta_sampler import MetaSampler
from meta_policy_search.samplers.meta_sample_processor import MetaSampleProcessor
from meta_policy_search.policies.meta_gaussian_mlp_policy import MetaGaussianMLPPolicy
from meta_policy_search.policies.conv import MAMLGaussianMLPPolicy
from meta_policy_search.utils import logger
from meta_policy_search.utils.utils import set_seed, ClassEncoder
import numpy as np
import tensorflow as tf
import os
import json
import argparse
import time
# Import for mario
from railrl.torch.metac.gcg.make_env import make_env
meta_policy_search_path = '/'.join(os.path.realpath(os.path.dirname(__file__)).split('/')[:-1])
def main(config):
set_seed(config['seed'])
baseline = globals()[config['baseline']]() #instantiate baseline
env = make_env(config['env_id'], config)
# import pdb; pdb.set_trace()# env = globals()[config['env']]() # instantiate env
# env = normalize(env) # apply normalize wrapper to env
print("MARIO obs shape", env.observation_space.shape)
policy = MAMLGaussianMLPPolicy(
'conv',
obs_dim=int(np.prod(env.observation_space.shape)),
action_dim=int(np.prod(env.action_space.shape)),
meta_batch_size=config['meta_batch_size'],
hidden_sizes=config['hidden_sizes'],
)
sampler = MetaSampler(
env=env,
policy=policy,
rollouts_per_meta_task=config['rollouts_per_meta_task'], # This batch_size is confusing
meta_batch_size=config['meta_batch_size'],
max_path_length=config['max_path_length'],
parallel=config['parallel'],
)
sample_processor = MetaSampleProcessor(
baseline=baseline,
discount=config['discount'],
gae_lambda=config['gae_lambda'],
normalize_adv=config['normalize_adv'],
)
algo = ProMP(
policy=policy,
inner_lr=config['inner_lr'],
meta_batch_size=config['meta_batch_size'],
num_inner_grad_steps=config['num_inner_grad_steps'],
learning_rate=config['learning_rate'],
num_ppo_steps=config['num_promp_steps'],
clip_eps=config['clip_eps'],
target_inner_step=config['target_inner_step'],
init_inner_kl_penalty=config['init_inner_kl_penalty'],
adaptive_inner_kl_penalty=config['adaptive_inner_kl_penalty'],
)
trainer = Trainer(
algo=algo,
policy=policy,
env=env,
sampler=sampler,
sample_processor=sample_processor,
n_itr=config['n_itr'],
num_inner_grad_steps=config['num_inner_grad_steps'],
)
trainer.train()
if __name__=="__main__":
idx = int(time.time())
parser = argparse.ArgumentParser(description='ProMP: Proximal Meta-Policy Search')
parser.add_argument('--config_file', type=str, default='', help='json file with run specifications')
parser.add_argument('--dump_path', type=str, default=meta_policy_search_path + '/data/pro-mp/run_%d' % idx)
args = parser.parse_args()
if args.config_file: # load configuration from json file
with open(args.config_file, 'r') as f:
config = json.load(f)
else: # use default config
config = {
'seed': 1,
'baseline': 'LinearFeatureBaseline',
'env_id': 'mariomultilevel',
# sampler config
'rollouts_per_meta_task': 2,
'max_path_length': 10,
'parallel': True,
# sample processor config
'discount': 0.99,
'gae_lambda': 1,
'normalize_adv': True,
# policy config
'hidden_sizes': (64, 64),
'learn_std': True, # whether to learn the standard deviation of the gaussian policy
# ProMP config
'inner_lr': 0.1, # adaptation step size
'learning_rate': 1e-3, # meta-policy gradient step size
'num_promp_steps': 5, # number of ProMp steps without re-sampling
'clip_eps': 0.3, # clipping range
'target_inner_step': 0.01,
'init_inner_kl_penalty': 5e-4,
'adaptive_inner_kl_penalty': False, # whether to use an adaptive or fixed KL-penalty coefficient
'n_itr': 1001, # number of overall training iterations
'meta_batch_size': 40, # number of sampled meta-tasks per iterations
'num_inner_grad_steps': 1, # number of inner / adaptation gradient steps
# Mario config
"env_kwargs" : {
"screen_size": 20,
"grayscale_obs": False,
"frame_skip": 1,
"lifelong": False,
"max_lives": 1,
"scramble_action_freq": 0,
"frame_stack": 1,
"action_stack": 0,
"default_level": 0,
"shuffle_env_actions": True,
"shuffle_envs": False,
"singletask": True
},
"algo_kwargs":{
"batch_size":8,
"adapt_batch_size": 64,
"meta_batch_size":26,
"test_size": 6,
"mpc_horizon":5,
"window_len": 200,
"min_num_steps_before_training": 1000,
"min_num_steps_before_adapting": 7,
"num_expl_steps_per_train_loop": 100,
"max_path_length":1000,
"eval_freq": 10,
"outer_update_steps":20,
"inner_update_steps":4,
"adapt_freq": 1,
"num_adapt_steps": 5,
"num_epochs":10000,
"inner_lr":1e-3,
"inner_opt_name": "SGD",
"adapt_opt_name": "SGD",
"adapt_inner_lr": 1e-3,
"debug":False,
"use_consecutive_batch": False,
"reset_meta_model": True,
"adapt_same_batch": False,
"train_same_batch": True,
"shuffle_actions": False,
"explore_if_stuck": False,
"shuffle_env_actions": False,
"adapt_from_replay": False,
"test_buffer_size": 550,
"save_buffer": True
},
"trainer_kwargs":{
"learning_rate":1e-4,
"discount":0.99,
"data_type": "uint8",
"opt_name": "Adam",
"optimizer_kwargs": {
"weight_decay": 0
},
"bayesian": False
},
"controller_kwargs": {
"num_simulated_paths":500,
"cem_steps":3
},
"reward_predictor_kwargs":{
"reward_type":"categorical",
"num_bins":41
},
"replay_buffer_kwargs":{
"max_replay_buffer_size":20000
},
"adaptive_replay_buffer_kwargs":{
"max_replay_buffer_size":10
},
"extra_args": {
"prior_sigma_1": 0.001,
"prior_pi": 1.0,
"posterior_rho_init": -6
},
"model_kwargs": {
"data_type": "uint8",
"reward_scale": 10.0,
"bayesian": False,
"conv_norm_type": "layer"
},
"log_comet": True,
"debug": False,
"use_gpu": True,
}
# configure logger
logger.configure(dir=args.dump_path, format_strs=['stdout', 'log', 'csv'],
snapshot_mode='last_gap')
# dump run configuration before starting training
json.dump(config, open(args.dump_path + '/params.json', 'w'), cls=ClassEncoder)
# start the actual algorithm
main(config)
``` |
{
"source": "jiequancui/LBGAT",
"score": 2
} |
#### File: jiequancui/LBGAT/auto_attack_eval.py
```python
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.autograd import Variable
import torch.optim as optim
from torchvision import datasets, transforms
from models.wideresnet import *
from autoattack.autoattack import AutoAttack
parser = argparse.ArgumentParser(description='PyTorch CIFAR PGD Attack Evaluation')
parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',
help='input batch size for testing (default: 200)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--epsilon', default=0.031,
help='perturbation')
parser.add_argument('--num-steps', default=20, type=int,
help='perturb number of steps')
parser.add_argument('--step-size', default=0.003,
help='perturb step size')
parser.add_argument('--random',
default=True,
help='random initialization for PGD')
parser.add_argument('--model-path',
default='./checkpoints/model_cifar_wrn.pt',
help='model for white-box attack evaluation')
parser.add_argument('--source-model-path',
default='./checkpoints/model_cifar_wrn.pt',
help='source model for black-box attack evaluation')
parser.add_argument('--target-model-path',
default='./checkpoints/model_cifar_wrn.pt',
help='target model for black-box attack evaluation')
parser.add_argument('--white-box-attack', default=True,
help='whether perform white-box attack')
parser.add_argument('--mark', default=None, type=str,
help='log file name')
parser.add_argument('--widen_factor', default=None, type=int,
help='widen_factor for wideresnet')
parser.add_argument('--num_classes', default=10, type=int,
help='cifar10 or cifar100')
parser.add_argument('--dataparallel', default=False, type=bool,
help='whether model is trained with dataparallel')
args = parser.parse_args()
# settings
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# set up data loader
transform_test = transforms.Compose([transforms.ToTensor(),])
if args.num_classes == 100:
testset = torchvision.datasets.CIFAR100(root='./data/cifar100', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
else:
testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
def _pgd_whitebox(model,
X,
y,
adversary,
epsilon=args.epsilon,
num_steps=args.num_steps,
step_size=args.step_size):
out = model(X)
err = (out.data.max(1)[1] != y.data).float().sum()
X_pgd = adversary.run_standard_evaluation(X, y, bs=X.size(0))
err_pgd = (model(X_pgd).data.max(1)[1] != y.data).float().sum()
return err, err_pgd
def eval_adv_test_whitebox(model, device, test_loader, adverary):
"""
evaluate model by white-box attack
"""
model.eval()
robust_err_total = 0
natural_err_total = 0
for data, target in test_loader:
data, target = data.to(device), target.to(device)
# pgd attack
X, y = Variable(data, requires_grad=True), Variable(target)
err_natural, err_robust = _pgd_whitebox(model, X, y, adverary)
robust_err_total += err_robust
natural_err_total += err_natural
open("Logs/"+args.mark,"a+").write("robust_err_total: "+str(robust_err_total)+"\n")
open("Logs/"+args.mark,"a+").write("natural_err_total: "+str(natural_err_total)+"\n")
def main():
if args.white_box_attack:
# white-box attack
open("Logs/"+args.mark,"a+").write('pgd white-box attack\n')
model = WideResNet(num_classes=args.num_classes, widen_factor=args.widen_factor)
if args.dataparallel:
model = nn.DataParallel(model).to(device)
else:
model = model.to(device)
model.load_state_dict(torch.load(args.model_path))
adversary = AutoAttack(model, norm='Linf', eps=args.epsilon, version='standard', log_path = "Logs/"+args.mark)
adversary.seed = 0
eval_adv_test_whitebox(model, device, test_loader, adversary)
if __name__ == '__main__':
main()
``` |
{
"source": "jieralice13/forte",
"score": 2
} |
#### File: examples/biobert_ner/bio_ner_predictor.py
```python
from typing import Dict, List, Optional, Type, Tuple, Any
import numpy as np
import torch
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
)
from forte.common.configuration import Config
from forte.common.resources import Resources
from forte.data.data_pack import DataPack
from forte.data.ontology import Annotation
from forte.data.types import DataRequest
from forte.processors.base.batch_processor import FixedSizeBatchProcessor
from ft.onto.base_ontology import Sentence, EntityMention, Subword
class BioBERTNERPredictor(FixedSizeBatchProcessor):
"""
An Named Entity Recognizer fine-tuned on BioBERT
Note that to use :class:`BioBERTNERPredictor`, the :attr:`ontology` of
:class:`Pipeline` must be an ontology that include
``ft.onto.base_ontology.Subword`` and ``ft.onto.base_ontology.Sentence``.
"""
def __init__(self):
super().__init__()
self.resources = None
self.device = None
self.ft_configs = None
self.model_config = None
self.model = None
self.tokenizer = None
@staticmethod
def _define_context() -> Type[Annotation]:
return Sentence
@staticmethod
def _define_input_info() -> DataRequest:
input_info: DataRequest = {
Subword: [],
Sentence: [],
}
return input_info
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
if resources.get("device"):
self.device = resources.get("device")
else:
self.device = torch.device('cuda') if torch.cuda.is_available() \
else torch.device('cpu')
self.resources = resources
self.ft_configs = configs
model_path = self.ft_configs.model_path
self.model_config = AutoConfig.from_pretrained(model_path)
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.model = AutoModelForTokenClassification.from_pretrained(
model_path,
from_tf=bool(".ckpt" in model_path),
config=self.model_config
)
self.model.to(self.device)
@torch.no_grad()
def predict(self, data_batch: Dict[str, Dict[str, List[str]]]) \
-> Dict[str, Dict[str, List[np.array]]]:
sentences = data_batch['context']
subwords = data_batch['Subword']
inputs = self.tokenizer(sentences, return_tensors="pt", padding=True)
inputs = {key: value.to(self.device) for key, value in inputs.items()}
input_shape = inputs['input_ids'].shape
if input_shape[1] > 512:
# TODO: Temporarily work around the length problem.
# The real solution should further split up the sentences to make
# the sentences shorter.
labels_idx = inputs['input_ids'].new_full(
input_shape, 2, device='cpu')[:, 1:-1].numpy()
else:
outputs = self.model(**inputs)[0].cpu().numpy()
score = np.exp(outputs) / np.exp(outputs).sum(-1, keepdims=True)
labels_idx = score.argmax(axis=-1)[:, 1:-1] # Remove placeholders.
pred: Dict = {"Subword": {"ner": [], "tid": []}}
for i in range(len(subwords["tid"])):
tids = subwords["tid"][i]
ner_tags = []
for j in range(len(tids)):
ner_tags.append(self.model.config.id2label[labels_idx[i, j]])
pred["Subword"]["ner"].append(np.array(ner_tags))
pred["Subword"]["tid"].append(np.array(tids))
return pred
def _complete_entity(self,
subword_entities: List[Dict[str, Any]],
data_pack: DataPack,
tids: List[int]) -> Tuple[int, int]:
""" Complete entity span from predicted subword entities
Start from the first subword with predicted entity. If this entity
is a subword (e.g. "##on"), then move on to the previous subword until
it's no longer a subword (e.g. "br")
"""
first_idx: int = subword_entities[0]['idx']
first_tid = subword_entities[0]['tid']
while first_idx > 0 and not data_pack.get_entry(
first_tid).is_first_segment:
first_idx -= 1
first_tid = tids[first_idx]
last_idx: int = subword_entities[-1]['idx']
while last_idx < len(tids) - 1 and not data_pack.get_entry(
tids[last_idx + 1]).is_first_segment:
last_idx += 1
return first_idx, last_idx
def _compose_entities(self,
entities: List[Dict[str, Any]],
data_pack: DataPack,
tids: List[int]) -> List[Tuple[int, int]]:
""" Composes entity spans from subword entity predictions
Label Syntax:
A "B" label indicates the beginning of an entity, an "I" label
indicates the continuation of an entity, and an "O" label indicates
the absence of an entity.
Example: with - br - ##on - ##chi - ##oli - ##tis - .
O - B - I - I - I - I - O
Due to possible instabilities of the model on out-of-distribution data,
sometimes the prediction may not follow the label format.
Example 1: with - br - ##on - ##chi - ##oli - ##tis - .
O - B - I - O - I - O - O
Example 2: with - br - ##on - ##chi - ##oli - ##tis - .
O - O - O - I - I - I - O
This method takes entity predictions of subwords and recovers the
set of complete entities, defined by the indices of their beginning
and ending subwords. (begin_idx, end_idx)
"""
complete_entities: List[Tuple[int, int]] = []
subword_entities: List[Dict[str, Any]] = []
for entity in entities:
subword = data_pack.get_entry(entity['tid'])
if entity['label'] == 'B' and subword.is_first_segment:
# Flush the existing entity and start a new entity
if subword_entities:
complete_entity = \
self._complete_entity(subword_entities,
data_pack,
tids)
complete_entities.append(complete_entity)
subword_entities = [entity]
else:
# Continue accumuulating subword entities
subword_entities.append(entity)
if subword_entities:
complete_entity = self._complete_entity(subword_entities,
data_pack,
tids)
complete_entities.append(complete_entity)
return complete_entities
def pack(self, data_pack: DataPack,
output_dict: Optional[Dict[str, Dict[str, List[Any]]]] = None):
"""
Write the prediction results back to datapack. by writing the predicted
ner to the original subwords and convert predictions to something that
makes sense in a word-by-word segmentation
"""
if output_dict is None:
return
for i in range(len(output_dict["Subword"]["tid"])):
tids = output_dict["Subword"]["tid"][i]
labels = output_dict["Subword"]["ner"][i]
# Filter to labels not in `self.ft_configs.ignore_labels`
entities = [dict(idx=idx, label=label, tid=tid)
for idx, (label, tid) in enumerate(zip(labels, tids))
if label not in self.ft_configs.ignore_labels]
entity_groups = self._compose_entities(entities, data_pack, tids)
# Add NER tags and create EntityMention ontologies.
for first_idx, last_idx in entity_groups:
first_token: Subword = data_pack.get_entry( # type: ignore
tids[first_idx])
begin = first_token.span.begin
last_token: Subword = data_pack.get_entry( # type: ignore
tids[last_idx])
end = last_token.span.end
entity = EntityMention(data_pack, begin, end)
entity.ner_type = self.ft_configs.ner_type
@classmethod
def default_configs(cls):
r"""Default config for NER Predictor"""
configs = super().default_configs()
# TODO: Batcher in NER need to be update to use the sytem one.
configs["batcher"] = {"batch_size": 10}
more_configs = {'model_path': None,
'ner_type': 'BioEntity',
'ignore_labels': ['O']}
configs.update(more_configs)
return configs
```
#### File: data_augmentation/data_select/data_select_and_augment_example.py
```python
import argparse
import logging
import yaml
from forte.data.multi_pack import MultiPack
from forte.pipeline import Pipeline
from forte.processors.base.data_selector_for_da import RandomDataSelector
from forte.processors.nltk_processors import NLTKWordTokenizer, NLTKPOSTagger
from forte.data.selector import AllPackSelector
from forte.data.caster import MultiPackBoxer
from forte.processors.data_augment import ReplacementDataAugmentProcessor
logging.root.setLevel(logging.INFO)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config_file", default="./config.yml",
help="Config YAML filepath")
args = parser.parse_args()
# loading config
config = yaml.safe_load(open(args.config_file, "r"))
nlp: Pipeline[MultiPack] = Pipeline()
nlp.set_reader(RandomDataSelector(),
config=config["data_selector_config"])
nlp.add(component=MultiPackBoxer(), config=config["boxer_config"])
nlp.add(component=NLTKWordTokenizer(), selector=AllPackSelector())
nlp.add(component=NLTKPOSTagger(), selector=AllPackSelector())
nlp.add(component=ReplacementDataAugmentProcessor(),
config=config["da_processor_config"])
nlp.initialize()
for _, m_pack in enumerate(nlp.process_dataset()):
aug_pack = m_pack.get_pack('augmented_input')
logging.info(aug_pack.text)
if __name__ == "__main__":
main()
```
#### File: examples/gpt2/multipack_pipeline_gpt2.py
```python
import json
from termcolor import colored
from forte.data.multi_pack import MultiPack
from forte.pipeline import Pipeline
from ft.onto.base_ontology import Token, Sentence
def create_pipeline(config_path: str) -> Pipeline[MultiPack]:
pl = Pipeline[MultiPack]()
pl.init_from_config_path(config_path)
print("\nFinished loading\n")
return pl
if __name__ == '__main__':
# This line adds a reader and 3 processors in to the pipeline
# 1. forte.data.readers.MultiPackSentenceReader
# 2. forte.processors.TextGenerationProcessor
# 3. forte.processors.nltk_processors.NLTKWordTokenizer
# 4. forte.processors.nltk_processors.NLTKPOSTagger
nlp = create_pipeline('sample_multipack_pipeline_gpt.yml')
nlp.initialize()
input_dir = "data/"
multipack: MultiPack = nlp.process_one(input_dir)
input_pack_name = "input_src"
output_pack_name = "output_tgt"
src_cnt = len(list(multipack.get_pack(input_pack_name).get(Sentence)))
tgt_cnt = len(list(multipack.get_pack(output_pack_name).get(Sentence)))
link_cnt = multipack.num_links
print(f'sentence_cnt: src{src_cnt}, tgt{tgt_cnt}, link_cnt{link_cnt}')
with open("multipack_output.txt", "w+") as fout:
input_pack = multipack.get_pack(input_pack_name)
output_pack = multipack.get_pack(output_pack_name)
for context, gen_sentence in zip(input_pack.get(Sentence),
output_pack.get(Sentence)):
print(colored("Initial Context", "green"), context.text)
print(colored("Generated Sentence", "green"), gen_sentence.text)
print("======================TAGS======================")
for token in output_pack.get(entry_type=Token,
range_annotation=gen_sentence):
print(colored("Token", "red"), token.text,
colored("POS Tag", "red"), token.pos)
print("======================END======================")
parsed = json.loads(multipack.serialize())
fout.write(json.dumps(parsed, indent=4))
```
#### File: data/extractors/seqtagging_extractor.py
```python
import logging
from typing import Tuple, List, Dict, Union, Optional, Iterable, Type
from torch import Tensor
from forte.common.configuration import Config
from forte.data.converter.feature import Feature
from forte.data.data_pack import DataPack
from forte.data.base_extractor import BaseExtractor
from forte.data.extractors.utils import bio_tagging, add_entry_to_pack
from forte.data.ontology import Annotation
from forte.utils import get_class
logger = logging.getLogger(__name__)
__all__ = [
"BioSeqTaggingExtractor"
]
class BioSeqTaggingExtractor(BaseExtractor):
r"""BioSeqTaggingExtractor will the feature by performing BIO encoding
for the attribute of entry and aligning to the tagging_unit entry. Most of
the time, a user will not need to call this class explicitly, they will
be called by the framework.
Args:
config: An instance of `Dict` or
:class:`~forte.common.configuration.Config`.
See :meth:`default_configs` for available options and
default values.
"""
def initialize(self, config: Union[Dict, Config]):
# pylint: disable=attribute-defined-outside-init
super().initialize(config=config)
if self.config.attribute is None:
raise AttributeError("attribute is required "
"in BioSeqTaggingExtractor.")
if not self.config.tagging_unit:
raise AttributeError("tagging_unit is required in "
"BioSeqTaggingExtractor.")
self.attribute: str = self.config.attribute
self.tagging_unit: Type[Annotation] = \
get_class(self.config.tagging_unit)
self.is_bert: bool = self.config.is_bert
@classmethod
def default_configs(cls):
r"""Returns a dictionary of default hyper-parameters.
Here:
entry_type (str).
Required. The string to the ontology type that the extractor
will get feature from, e.g: `"ft.onto.base_ontology.EntityMention"`.
attribute (str): Required. The attribute name of the
entry from which labels are extracted.
tagging_unit (str): Required. The tagging label
will align to the tagging_unit Entry,
e.g: `"ft.onto.base_ontology.Token"`.
"vocab_method" (str)
What type of vocabulary is used for this extractor.
`raw`, `indexing`, `one-hot` are supported, default is `indexing`.
Check the behavior of vocabulary under different setting
in :class:`~forte.data.vocabulary.Vocabulary`
"need_pad" (bool)
Whether the `<PAD>` element should be added to vocabulary. And
whether the feature need to be batched and padded. Default is True.
When True, pad_value has to be set.
"vocab_use_unk" (bool)
Whether the `<UNK>` element should be added to vocabulary.
Default is true.
"pad_value" (int)
ID assigned to pad. It should be integer smaller than 0.
Default is 0.
"vocab_unk_id" (int)
ID assigned to unk. It should be integer smaller than 0.
Default is 1.
is_bert (bool)
It indicates whether Bert model is used. If true, padding
will be added to the beginning and end of a sentence
corresponding to the special tokens ([CLS], [SEP])
used in Bert. Default is False.
For example, the config can be:
.. code-block:: python
{
"entry_type": "ft.onto.base_ontology.EntityMention",
"attribute": "ner_type",
"tagging_unit": "ft.onto.base_ontology.Token"
}
The extractor will extract the BIO NER tags for instances.
A possible feature can be:
.. code-block:: python
[[None, "O"], ["LOC", "B"], ["LOC", "I"], [None, "O"],
[None, "O"], ["PER", "B"], [None, "O"]]
"""
config = super().default_configs()
config.update({"attribute": None,
"tagging_unit": "",
"is_bert": False})
return config
@classmethod
def _bio_variance(cls, tag):
r"""Return the BIO-schemed augmented tagging scheme, for example,
if the `tag` is "person", the output would be `B-person`, `I-person`,
`O-person`.
Currently only supports B, I, O label.
Args:
tag (str): Tag name.
"""
return [(tag, "B"), (tag, "I"), (None, "O")]
def predefined_vocab(self, predefined: Iterable):
r"""Add predefined tags into the vocabulary. i.e. One can construct the
tag vocabulary without exploring the training data.
Args:
predefined (Iterable[str]): A set of pre-defined tags.
"""
for tag in predefined:
for element in self._bio_variance(tag):
self.add(element)
def update_vocab(self, pack: DataPack, instance: Annotation):
r"""Add all the tag from one instance into the vocabulary.
Args:
pack (DataPack): The datapack that contains the current
instance.
instance (Annotation): The instance from which the
extractor will extractor feature.
"""
for entry in pack.get(self._entry_type, instance):
attribute = getattr(entry, self.attribute)
for tag_variance in self._bio_variance(attribute):
self.add(tag_variance)
def extract(self, pack: DataPack, instance: Annotation) -> Feature:
r"""Extract the sequence tagging feature of one instance. If the
vocabulary of this extractor is set, then the extracted tag sequences
will be converted to the tag ids (int).
Args:
pack (DataPack): The datapack that contains the current
instance.
instance (Annotation): The instance from which the
extractor will extractor feature.
Returns (Feature):
a feature that contains the extracted data.
"""
instance_tagged: List[Tuple[Optional[str], str]] = \
bio_tagging(pack, instance,
self.tagging_unit,
self._entry_type,
self.attribute)
pad_value = self.get_pad_value()
if self.vocab:
# Use the vocabulary to map data into representation.
vocab_mapped: List[Union[int, List[int]]] = []
for pair in instance_tagged:
vocab_mapped.append(self.element2repr(pair))
raw_data: List = vocab_mapped
if self.is_bert:
raw_data = [pad_value] + raw_data + [pad_value]
else:
# When vocabulary is not available, use the original data.
raw_data = instance_tagged
meta_data = {"pad_value": pad_value,
"dim": 1,
"dtype": int if self.vocab else tuple}
return Feature(data=raw_data,
metadata=meta_data,
vocab=self.vocab)
def pre_evaluation_action(self, pack: DataPack, instance: Annotation):
r"""This function is performed on the pack before the evaluation
stage, allowing one to perform some actions before the evaluation.
By default, this function will remove tags in the instance. You can
overwrite this function by yourself.
Args:
pack (DataPack): The datapack that contains the current
instance.
instance (Annotation): The instance on which the
extractor performs the pre-evaluation action.
"""
for entry in pack.get(self._entry_type, instance):
pack.delete_entry(entry)
def add_to_pack(self, pack: DataPack, instance: Annotation,
prediction: List[int]):
r"""Add the prediction for attribute to the instance. We make following
assumptions for prediction.
1. If we encounter "I" while its tag is different from the previous
tag, we will consider this "I" as a "B" and start a new tag here.
2. We will truncate the prediction it according to the number of
entry. If the prediction contains `<PAD>` element, this should
remove them.
Args:
pack (DataPack):
The datapack that contains the current instance.
instance (Annotation):
The instance to which the extractor add prediction.
prediction (Iterable[Union[int, Any]]):
This is the output of the model, which contains the index for
attributes of one instance.
"""
instance_tagging_unit: List[Annotation] = \
list(pack.get(self.tagging_unit, instance))
if self.is_bert:
prediction = prediction[1:-1]
prediction = prediction[:len(instance_tagging_unit)]
if isinstance(prediction, Tensor):
prediction = prediction.cpu().numpy()
tags = [self.id2element(x) for x in prediction]
tag_start = None
tag_end = None
tag_type = None
for entry, tag in zip(instance_tagging_unit, tags):
if tag[1] == "O" or tag[1] == "B" or \
(tag[1] == "I" and tag[0] != tag_type):
if tag_type:
entity_mention = add_entry_to_pack(pack,
self._entry_type,
tag_start,
tag_end)
setattr(entity_mention, self.attribute, tag_type)
tag_start = entry.begin
tag_end = entry.end
tag_type = tag[0]
else:
tag_end = entry.end
# Handle the final tag
if tag_type and tag_start and tag_end:
entity_mention = add_entry_to_pack(pack,
self._entry_type,
tag_start,
tag_end)
setattr(entity_mention, self.attribute, tag_type)
```
#### File: data/readers/openie_reader.py
```python
import logging
import os
from typing import Iterator, List
from forte.common.configuration import Config
from forte.common.exception import ProcessorConfigError
from forte.common.resources import Resources
from forte.data.data_pack import DataPack
from forte.data.data_utils_io import dataset_path_iterator
from forte.data.base_reader import PackReader
from ft.onto.base_ontology import Sentence, RelationLink, EntityMention
__all__ = [
"OpenIEReader"
]
class OpenIEReader(PackReader):
r""":class:`OpenIEReader` is designed to read in the Open IE dataset used
by Open Information Extraction task. The related paper can be found
`here
<https://gabrielstanovsky.github.io/assets/papers/emnlp16a/paper.pdf>`__.
The related source code for generating this dataset can be found
`here
<https://github.com/gabrielStanovsky/oie-benchmark>`__.
To use this Reader, you must follow the dataset format. Each line in
the dataset should contain following fields:
.. code-block:: none
<sentence>\t<predicate_head>\t<full_predicate>\t<arg1>\t<arg2>....
You can also find the dataset format `here
<https://github.com/gabrielStanovsky/oie-benchmark/tree/master/oie_corpus>`__.
"""
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
if configs.oie_file_extension is None:
raise ProcessorConfigError(
"Configuration oie_file_extension not provided.")
def _collect(self, *args, **kwargs) -> Iterator[str]:
# pylint: disable = unused-argument
r"""Should be called with param ``oie_directory`` which is a path to a
folder containing json files.
Args:
args: args[0] is the directory to the open ie files.
kwargs:
Returns: Iterator over files in the path with oie extensions.
"""
oie_directory: str = args[0]
oie_file_extension: str = self.configs.oie_file_extension
logging.info("Reading dataset from %s with extension %s",
oie_directory, oie_file_extension)
return dataset_path_iterator(oie_directory, oie_file_extension)
def _cache_key_function(self, oie_file: str) -> str:
return os.path.basename(oie_file)
def _parse_pack(self, file_path: str) -> Iterator[DataPack]:
pack: DataPack = DataPack()
text: str = ""
offset: int = 0
with open(file_path, "r", encoding="utf8") as f:
for line in f:
line = line.strip()
if line != "":
oie_component: List[str] = line.split("\t")
# Add sentence.
sentence = oie_component[0]
text += sentence + "\n"
Sentence(pack, offset, offset + len(sentence))
# Find argument 1.
arg1_begin = sentence.find(oie_component[3]) + offset
arg1_end = arg1_begin + len(oie_component[3])
arg1: EntityMention = EntityMention(
pack, arg1_begin, arg1_end)
# Find argument 2.
arg2_begin = sentence.find(oie_component[4]) + offset
arg2_end = arg2_begin + len(oie_component[4])
arg2: EntityMention = EntityMention(
pack, arg2_begin, arg2_end)
head_relation = RelationLink(pack, arg1, arg2)
head_relation.rel_type = oie_component[2]
offset += len(sentence) + 1
self.set_text(pack, text)
pack.pack_name = os.path.basename(file_path)
yield pack
@classmethod
def default_configs(cls):
config: dict = super().default_configs()
# Add OIE dataset file extension. The default is '.oie'
config.update({
'oie_file_extension': 'oie'
})
return config
```
#### File: processors/base/base_processor.py
```python
from abc import abstractmethod, ABC
from typing import Any, Dict, Set
from forte.data.base_pack import PackType
from forte.data.selector import DummySelector
from forte.pipeline_component import PipelineComponent
from forte.utils.utils_processor import record_types_and_attributes_check
__all__ = [
"BaseProcessor",
]
class BaseProcessor(PipelineComponent[PackType], ABC):
r"""Base class inherited by all kinds of processors such as trainer,
predictor and evaluator.
"""
def __init__(self):
super().__init__()
self.selector = DummySelector()
def record(self, record_meta: Dict[str, Set[str]]):
r"""Method to add output record of the current processor to
:attr:`forte.data.data_pack.Meta.record`. The key of the record
should be the entry type and values should be attributes of the entry
type. All the information would be used for consistency checking
purpose if :meth:`~forte.pipeline.Pipeline.enforce_consistency` is
enabled for the pipeline.
Args:
record_meta: The field in the datapack for type record that need to
fill in for consistency checking.
"""
pass
@classmethod
def expected_types_and_attributes(cls) -> Dict[str, Set[str]]:
r"""Method to add expected types and attributes for the input of the
current processor which would be checked before running the processor if
:meth:`~forte.pipeline.Pipeline.enforce_consistency` was enabled for
the pipeline.
"""
return {}
def check_record(self, input_pack: PackType):
# pylint: disable=protected-access
r"""Method to check type consistency if
:meth:`~forte.pipeline.Pipeline.enforce_consistency` is enabled
for the pipeline. If any expected type or its attribute
does not exist in the datapack record of the previous pipeline
component, an error of
:class:`~forte.common.exception.ExpectedRecordNotFound` will be raised.
Args:
input_pack: The input datapack.
"""
if self._check_type_consistency:
expectation = self.expected_types_and_attributes()
record_types_and_attributes_check(expectation, input_pack)
def write_record(self, input_pack: PackType):
r"""Method to write records of the output type of the current
processor to the datapack. The key of the record should be the entry
type and values should be attributes of the entry type. All the
information would be used for consistency checking purpose if
:meth:`~forte.pipeline.Pipeline.enforce_consistency` is enabled
for the pipeline.
Args:
input_pack: The input datapack.
"""
# pylint: disable=protected-access
self.record(input_pack._meta.record)
def process(self, input_pack: PackType):
self.check_record(input_pack)
# Set the component for recording purpose.
self._process(input_pack)
self.write_record(input_pack)
@abstractmethod
def _process(self, input_pack: PackType):
r"""The main function of the processor. The implementation should
process the ``input_pack``, and conduct operations such as adding
entries into the pack, or produce some side-effect such as writing
data into the disk.
Args:
input_pack: The input datapack.
"""
raise NotImplementedError
@classmethod
def default_configs(cls) -> Dict[str, Any]:
r"""Returns a `dict` of configurations of the processor with default
values. Used to replace the missing values of input ``configs`` during
pipeline construction.
"""
config = super().default_configs()
config.update({
'selector': {
'type': 'forte.data.selector.DummySelector',
'args': None,
'kwargs': {}
},
'overwrite': False,
})
return config
```
#### File: data_augment/algorithms/distribution_replacement_op.py
```python
import random
from typing import Tuple, Union, Dict, Any
from forte.common.configuration import Config
from forte.data.ontology import Annotation
from forte.processors.data_augment.algorithms.text_replacement_op \
import TextReplacementOp
from forte.processors.data_augment.algorithms.sampler import Sampler
__all__ = [
"DistributionReplacementOp",
]
class DistributionReplacementOp(TextReplacementOp):
r"""
This class is a replacement op to replace the input word
with a new word that is sampled by a sampler from a distribution.
Args:
sampler: The sampler that samples a word from a distribution.
configs: The config should contain `prob`,
The probability of whether to replace the input,
it should fall in [0, 1].
"""
def __init__(self, sampler: Sampler,
configs: Union[Config, Dict[str, Any]]):
super().__init__(configs)
self.sampler = sampler
def replace(self, input_anno: Annotation) -> Tuple[bool, str]:
r"""
This function replaces a word by sampling from a distribution.
Args:
input_anno (Annotation): The input annotation.
Returns:
A tuple of two values, where the first element is a boolean value
indicating whether the replacement happens, and the second
element is the replaced word.
"""
if random.random() > self.configs.prob:
return False, input_anno.text
word: str = self.sampler.sample()
return True, word
```
#### File: data_augment/algorithms/eda_processors.py
```python
from math import ceil
import random
from typing import List, Dict
from forte.common.configuration import Config
from forte.common.resources import Resources
from forte.data.data_pack import DataPack
from forte.data.multi_pack import MultiPack
from forte.processors.data_augment import ReplacementDataAugmentProcessor
from forte.utils.utils import get_class, create_class_with_kwargs
__all__ = [
"RandomSwapDataAugmentProcessor",
"RandomInsertionDataAugmentProcessor",
"RandomDeletionDataAugmentProcessor",
]
english_stopwords = [
'i', 'me', 'my', 'myself', 'we', 'our', 'ours',
'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your',
'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself',
'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its',
'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what',
'which', 'who', 'whom', 'this', 'that', "that'll", 'these',
'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a',
'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until',
'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against',
'between', 'into', 'through', 'during', 'before', 'after',
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on',
'off', 'over', 'under', 'again', 'further', 'then', 'once',
'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such',
'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too',
'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should',
"should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain',
'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't",
'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven',
"haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn',
"mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn',
"shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't",
'wouldn', "wouldn't"]
class RandomSwapDataAugmentProcessor(ReplacementDataAugmentProcessor):
r"""
Data augmentation processor for the Random Swap operation.
Randomly choose two words in the sentence and swap their positions.
Do this n times, where n = alpha * input length.
"""
def _augment(self, input_pack: MultiPack, aug_pack_names: List[str]):
augment_entry = get_class(self.configs["augment_entry"])
for pack_name in aug_pack_names:
data_pack: DataPack = input_pack.get_pack(pack_name)
annotations = list(data_pack.get(augment_entry))
if len(annotations) > 0:
replace_map: Dict = {}
for _ in range(ceil(self.configs['alpha'] * len(annotations))):
swap_idx = random.sample(range(len(annotations)), 2)
new_idx_0 = swap_idx[1] if swap_idx[1] not in replace_map \
else replace_map[swap_idx[1]]
new_idx_1 = swap_idx[0] if swap_idx[0] not in replace_map \
else replace_map[swap_idx[0]]
replace_map[swap_idx[0]] = new_idx_0
replace_map[swap_idx[1]] = new_idx_1
pid: int = data_pack.pack_id
for idx in replace_map:
self._replaced_annos[pid] \
.add((annotations[idx].span,
annotations[replace_map[idx]].text))
@classmethod
def default_configs(cls):
"""
Returns:
A dictionary with the default config for this processor.
Additional keys for Random Swap:
- alpha: 0 <= alpha <= 1. indicates the percent of the words
in a sentence that are changed. The processor will perform
the Random Swap operation (input length * alpha) times.
"""
config = super().default_configs()
config.update({
'augment_entry': "ft.onto.base_ontology.Token",
'other_entry_policy': {
# to use Texar hyperparams 'kwargs' must accompany with 'type'
'type': '',
"kwargs": {
"ft.onto.base_ontology.Document": "auto_align",
"ft.onto.base_ontology.Sentence": "auto_align"
}
},
'alpha': 0.1,
'augment_pack_names': {
'type': '',
'kwargs': {
'input_src': 'augmented_input_src'
}
}
})
return config
class RandomInsertionDataAugmentProcessor(ReplacementDataAugmentProcessor):
r"""
Data augmentation processor for the Random Insertion operation.
Find a random synonym of a random word in the sentence that is
not a stop word. Insert that synonym into a random position in
the sentence. Do this n times, where n = alpha * input length.
"""
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
# pylint: disable=attribute-defined-outside-init
self.stopwords = set(configs['stopwords'])
def _augment(self, input_pack: MultiPack, aug_pack_names: List[str]):
replacement_op = create_class_with_kwargs(
self.configs["data_aug_op"],
class_args={
"configs": self.configs["data_aug_op_config"]["kwargs"]
}
)
augment_entry = get_class(self.configs["augment_entry"])
for pack_name in aug_pack_names:
data_pack: DataPack = input_pack.get_pack(pack_name)
annotations = []
pos = [0]
for anno in data_pack.get(augment_entry):
if anno.text not in self.stopwords:
annotations.append(anno)
pos.append(anno.end)
if len(annotations) > 0:
for _ in range(ceil(self.configs['alpha'] * len(annotations))):
src_anno = random.choice(annotations)
_, replaced_text = replacement_op.replace(src_anno)
insert_pos = random.choice(pos)
if insert_pos > 0:
replaced_text = " " + replaced_text
else:
replaced_text = replaced_text + " "
self._insert(replaced_text, data_pack, insert_pos)
@classmethod
def default_configs(cls):
"""
Returns:
A dictionary with the default config for this processor.
By default, we use Dictionary Replacement with Wordnet to get
synonyms to insert.
Additional keys for Random Swap:
- alpha: 0 <= alpha <= 1. indicates the percent of the words
in a sentence that are changed. The processor will perform
the Random Insertion operation (input length * alpha) times.
- stopwords: a list of stopword for the language.
"""
config = super().default_configs()
config.update({
'augment_entry': "ft.onto.base_ontology.Token",
'other_entry_policy': {
'type': '',
'kwargs': {
"ft.onto.base_ontology.Document": "auto_align",
"ft.onto.base_ontology.Sentence": "auto_align"
}
},
'data_aug_op':
"forte.processors.data_augment.algorithms."
"dictionary_replacement_op.DictionaryReplacementOp",
'data_aug_op_config': {
'type': '',
"kwargs": {
"dictionary_class": (
"forte.processors.data_augment."
"algorithms.dictionary.WordnetDictionary"
),
"prob": 1.0,
"lang": "eng",
},
},
'alpha': 0.1,
'augment_pack_names': {
'type': '',
'kwargs': {
'input_src': 'augmented_input_src'
}
},
'stopwords': english_stopwords,
})
return config
class RandomDeletionDataAugmentProcessor(ReplacementDataAugmentProcessor):
r"""
Data augmentation processor for the Random Insertion operation.
Randomly remove each word in the sentence with probability alpha.
"""
def _augment(self, input_pack: MultiPack, aug_pack_names: List[str]):
augment_entry = get_class(self.configs["augment_entry"])
for pack_name in aug_pack_names:
data_pack: DataPack = input_pack.get_pack(pack_name)
for anno in data_pack.get(augment_entry):
if random.random() < self.configs['alpha']:
self._delete(anno)
@classmethod
def default_configs(cls):
"""
Returns:
A dictionary with the default config for this processor.
Additional keys for Random Deletion:
- alpha: 0 <= alpha <= 1. The probability to delete each word.
"""
config = super().default_configs()
config.update({
'augment_entry': "ft.onto.base_ontology.Token",
'other_entry_policy': {
'type': '',
"kwargs": {
"ft.onto.base_ontology.Document": "auto_align",
"ft.onto.base_ontology.Sentence": "auto_align"
}
},
"data_aug_op_config": {
'type': '',
'kwargs': {}
},
"alpha": 0.1,
'augment_pack_names': {
'type': '',
'kwargs': {
'input_src': 'augmented_input_src'
}
}
})
return config
```
#### File: forte/data/selector_test.py
```python
import unittest
from forte.data.multi_pack import MultiPack
from forte.data.selector import (
NameMatchSelector, RegexNameMatchSelector, FirstPackSelector,
AllPackSelector)
class SelectorTest(unittest.TestCase):
def setUp(self) -> None:
self.multi_pack = MultiPack()
data_pack1 = self.multi_pack.add_pack(ref_name="pack1")
data_pack2 = self.multi_pack.add_pack(ref_name="pack2")
data_pack3 = self.multi_pack.add_pack(ref_name="pack_three")
data_pack1.pack_name = "1"
data_pack2.pack_name = "2"
data_pack3.pack_name = "Three"
def test_name_match_selector(self) -> None:
selector = NameMatchSelector(select_name="pack1")
packs = selector.select(self.multi_pack)
doc_ids = ["1"]
for doc_id, pack in zip(doc_ids, packs):
self.assertEqual(doc_id, pack.pack_name)
def test_regex_name_match_selector(self) -> None:
selector = RegexNameMatchSelector(select_name="^.*\\d$")
packs = selector.select(self.multi_pack)
doc_ids = ["1", "2"]
for doc_id, pack in zip(doc_ids, packs):
self.assertEqual(doc_id, pack.pack_name)
def test_first_pack_selector(self) -> None:
selector = FirstPackSelector()
packs = list(selector.select(self.multi_pack))
self.assertEqual(len(packs), 1)
self.assertEqual(packs[0].pack_name, "1")
def test_all_pack_selector(self) -> None:
selector = AllPackSelector()
packs = selector.select(self.multi_pack)
doc_ids = ["1", "2", "Three"]
for doc_id, pack in zip(doc_ids, packs):
self.assertEqual(doc_id, pack.pack_name)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jieran233/WenYan",
"score": 3
} |
#### File: jieran233/WenYan/main.py
```python
import requests
import json
from urllib.parse import quote
import time
import plugin_daily as daily
import plugin_news as news
import plugin_rss as rss
# 发送留言方法
def submit(content, classesId, toAccountIds, toAccountIdsName, toAccountId_id_show, toAccountId_name_show, captcha, geli_yuser, geli_yschool, geli_session, remember_usr):
url = "https://apps.32k12.com/ecloud/ymessage/create.do"
payload = "id=&status=1&r_classesId="+classesId+"&q_eq_type_i=2&toAccountId_name=&toAccountId_id=&toAccountIds="+toAccountIds+"&toAccountIdsName="+quote(toAccountIdsName,'utf-8')+"&toAccountId_id_show="+quote(toAccountId_id_show,'utf-8')+"&toAccountId_name_show="+quote(toAccountId_name_show,'utf-8')+"&app_code_1612184765242=99&content="+quote(content,'utf-8')+"&attachmentId="
headers = {'accept':"application/json, text/javascript, */*; q=0.01", 'accept-encoding':"gzip, deflate, br", 'accept-language':"zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6", 'content-type':"application/x-www-form-urlencoded; charset=UTF-8", 'cookie':"captcha="+captcha+"; geli-yuser="+geli_yuser+"; geli-yschool="+geli_yschool+"; geli-session="+geli_session+"; remember_usr="+remember_usr, 'dnt':"1", 'origin':"https://apps.32k12.com", 'referer':"https://apps.32k12.com/ecloud/ymessage/create.do?placeValuesBefore", 'sec-fetch-dest':"empty", 'sec-fetch-mode':"cors", 'sec-fetch-site':"same-origin", 'user-agent':"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36 Edg/87.0.664.41", 'x-requested-with':"XMLHttpRequest"}
response = requests.request("POST", url, data=payload, headers=headers)
return(response.text)
# # 按指定长度分段切割字符串或列表
# # 参考资料 https://blog.csdn.net/qq_26373925/article/details/101135611
# def cut(obj, sec):
# return [obj[i:i+sec] for i in range(0,len(obj),sec)]
# 填写参数
classesId = "135797"
toAccountIds = "6899229"
toAccountIdsName = "岳锦天同学"
toAccountId_id_show = "6899229;"
toAccountId_name_show = "岳锦天同学;"
# 填写Cookie
captcha = "21983c78d4c60cc1196b669a1776f4972171532-32518228328752101"#[该Cookie需保持最新(浏览会话结束时到期)]
geli_yuser = "4851022"
geli_yschool = "4214"
geli_session = "55bc823aaa73840eefbe451fc15c3370"#[该Cookie需保持最新(浏览会话结束时到期)]
remember_usr = "13910137227"
# 填写发送内容(手动分条)
contents = ["早上好,又是新的一天~ || 每日一言:"+daily.hitokoto()+" || "+daily.tenki('石家庄')+" || "+daily.covid19('河北')+" || "+daily.covid19('北京'), "百度风云榜实时热点:"+rss.baidutop(), "今日新番放送:"+rss.bangumi()[:250], "蓝点网资讯:"+rss.landiannews(), "酷安图文编辑精选:"+rss.coolapkpy(), "StuartRiki_KeyTV的B站动态:"+rss.keytvnews(), "历史上的今天:"+news.eventHistory()]
# 发送留言并输出返回结果
for i in range(0,len(contents)):
# 倒序分条发送
j = len(contents)-i-1
num = '('+str(j+1)+'/'+str(len(contents))+')'
print(num)
print(submit(num+contents[j], classesId, toAccountIds, toAccountIdsName, toAccountId_id_show, toAccountId_name_show, captcha, geli_yuser, geli_yschool, geli_session, remember_usr))
# 延时1秒,防止顺序错误或被服务端ban
time.sleep(1)
# # 服务端要求内容长度必须在1到500个字符之间
# # 处理发送内容,大于500字符则分条发送
# if len(content.encode()) > 500:
# print("[info] 发送内容大于500字符,将采用分条发送")
# content_parts = cut(content.encode(),500)
# # print(content_parts)
# for i in range(0,len(content_parts)-1):
# print(content_parts[i].decode())
# # print("发送消息 (part"+str(i)+"/"+str(len(content_parts))+"):"+submit(, classesId, toAccountIds, toAccountIdsName, toAccountId_id_show, toAccountId_name_show, captcha, geli_yuser, geli_yschool, geli_session, remember_usr))
# else:
# print("[info] 发送内容获取完毕,即将发送")
# print(submit(content, classesId, toAccountIds, toAccountIdsName, toAccountId_id_show, toAccountId_name_show, captcha, geli_yuser, geli_yschool, geli_session, remember_usr))
``` |
{
"source": "Jie-Re/GraphGallery",
"score": 3
} |
#### File: examples/Defense/defense_poisoning.py
```python
import graphgallery as gg
import graphgallery.functional as gf
import tensorflow as tf
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
gg.set_backend("th")
def forward(A, X, target, w1, w2):
A = gf.normalize_adj_tensor(A)
h1 = F.relu(A @ X @ w1)
h2 = A @ h1 @ w2
out = h2[target]
return out.view(1, -1)
def defense(graph, budget, eps, target):
"""
Defender Model
@param graph: clean graph or attacked graph
@param budget: maximum number of modification
@param eps: calibration constant
@param target: target node
@return: graph after defensive perturbation
"""
trainer = gg.gallery.nodeclas.GCN(device='cpu', seed=100).make_data(graph).build()
trainer.fit(splits.train_nodes, splits.val_nodes)
w1, w2 = trainer.model.parameters()
w1 = w1.t()
w2 = w2.t()
A = gf.astensor(graph.A)
X = gf.astensor(graph.x)
A = nn.Parameter(A.to_dense())
t = gf.astensor(target)
C = graph.num_classes
loss_fn = nn.CrossEntropyLoss()
edges = []
for _ in range(int(budget)):
out = forward(A, X, t, w1, w2)
coeff = F.softmax(out / eps, dim=-1).view(-1)
loss = 0
for c in torch.arange(C):
loss += loss_fn(out, torch.tensor([c])) * coeff[c]
adj_grad = torch.autograd.grad(loss.sum(), A, create_graph=False)[0]
# 计算梯度最大值对应的边
N = adj_grad.size(0)
# gradient ascent
# if A_ij is 1 then the edge should be removed
# if A_ij is 0 then the edge should be added
# if adj_grad_ij > 0 then the edge would be added
# if adj_grad_ij < 0 then the edge would be removed
adj_grad *= 1 - 2 * A
adj_grad = adj_grad[t]
I = adj_grad.argmax()
# row = I.floor_divide(N)
row = t
col = I.fmod(N)
A[row, col] = A[col, row] = 1 - A[row, col]
edges.append([row.item(), col.item()])
defense_g = graph.from_flips(edge_flips=edges)
return defense_g
"""
Data Preprocessing
"""
data = gg.datasets.NPZDataset('cora', root='~/GraphData/datasets', transform='standardize')
graph = data.graph
splits = data.split_nodes(random_state=15)
"""
Attacker Model
"""
# GCN for attacker
trainer_a = gg.gallery.nodeclas.GCN(device='cpu', seed=42).make_data(graph).build()
trainer_a.fit(splits.train_nodes, splits.val_nodes)
w1_a, w2_a = trainer_a.model.parameters()
w1_a = w1_a.t()
w2_a = w2_a.t()
# attacker model
W_a = w1_a @ w2_a
W_a = gf.tensoras(W_a)
attacker = gg.attack.targeted.Nettack(graph).process(W_a)
# attacker = gg.attack.targeted.FGSM(graph).process(trainer)
"""
Generate attacked_g, clean_defended_g, attacked_defended_g
"""
# set target, budget, eps
# target = np.random.choice(splits.test_nodes, 1)[0]
# target = 1000
# target-loop, write the result out to the file
with open("changeof_clean_attacked.txt", "w") as f:
# clean_change = []
# attacked_change = []
for target in splits.test_nodes:
budget = 1
eps = 1
# true label
target_label = graph.node_label[target]
# attacked_g
attacker.set_max_perturbations()
attacker.reset()
attacker.attack(target,
direct_attack=True,
structure_attack=True,
feature_attack=False)
attack_g = attacker.g
print(f'{attacker.num_budgets} edges has been modified.')
# clean_defended_g
clean_defended_g = defense(graph, budget, eps, target)
# attacked_defended_g
attacked_defended_g = defense(attack_g, budget, eps, target)
"""
Prediction
"""
# clean graph
trainer = gg.gallery.nodeclas.GCN(seed=1234567).make_data(graph).build()
his = trainer.fit(splits.train_nodes,
splits.val_nodes,
verbose=0,
epochs=100)
clean_predict = trainer.predict(target, transform="softmax")
clean_label = np.argmax(clean_predict)
# attacked graph
trainer = gg.gallery.nodeclas.GCN(seed=1234567).make_data(attack_g).build()
his = trainer.fit(splits.train_nodes,
splits.val_nodes,
verbose=0,
epochs=100)
attacked_predict = trainer.predict(target, transform="softmax")
attacked_label = np.argmax(attacked_predict)
# clean defended graph
trainer = gg.gallery.nodeclas.GCN(seed=1234567).make_data(clean_defended_g).build()
his = trainer.fit(splits.train_nodes,
splits.val_nodes,
verbose=0,
epochs=100)
clean_defended_predict = trainer.predict(target, transform="softmax")
clean_defended_label = np.argmax(clean_defended_predict)
# attacked defended graph
trainer = gg.gallery.nodeclas.GCN(seed=1234567).make_data(attacked_defended_g).build()
his = trainer.fit(splits.train_nodes,
splits.val_nodes,
verbose=0,
epochs=100)
attacked_defended_predict = trainer.predict(target, transform="softmax")
attacked_defended_label = np.argmax(attacked_defended_predict)
# clean_change.append(clean_predict[clean_label]-clean_defended_predict[clean_defended_label])
# attacked_change.append(attacked_predict[attacked_label]-attacked_defended_predict[attacked_defended_label])
f.write(f"{clean_predict[clean_label]-clean_defended_predict[clean_defended_label],attacked_predict[attacked_label]-attacked_defended_predict[attacked_defended_label]}\n")
```
#### File: graphgallery/attack/flip_attacker.py
```python
import numpy as np
import scipy.sparse as sp
from graphgallery import functional as gf
from .attacker import Attacker
class FlipAttacker(Attacker):
def __init__(self, graph, device="cpu", seed=None, name=None, **kwargs):
super().__init__(graph, device=device, seed=seed, name=name, **kwargs)
assert not graph.multiple
self.nattr_flips = None
self.eattr_flips = None
self.adj_flips = None
@property
def A(self):
adj_flips = self.edge_flips
if self.modified_adj is None:
if adj_flips is not None:
self.modified_adj = gf.flip_adj(self.graph.adj_matrix,
adj_flips)
else:
self.modified_adj = self.graph.adj_matrix
adj = self.modified_adj
if gf.is_anytensor(adj):
adj = gf.tensoras(adj)
if isinstance(adj, np.ndarray):
adj = sp.csr_matrix(adj)
elif sp.isspmatrix(adj):
adj = adj.tocsr(copy=False)
else:
raise TypeError(adj)
return adj
@property
def x(self):
return self.nx
@property
def nx(self):
attr_flips = self.nx_flips
if self.modified_nx is None:
if attr_flips is not None:
self.modified_nx = gf.flip_attr(self.graph.node_attr,
attr_flips)
else:
self.modified_nx = self.graph.node_attr
x = self.modified_nx
if sp.isspmatrix(x):
x = x.A
elif gf.is_anytensor(x):
x = gf.tensoras(x)
elif not isinstance(x, np.ndarray):
raise TypeError(x)
return x
@property
def ex(self):
# TODO
return None
@property
def d(self):
if self.modified_degree is None:
self.modified_degree = self.A.sum(1).A1.astype(self.intx)
return self.modified_degree
@property
def edge_flips(self):
flips = self.adj_flips
if flips is None or len(flips) == 0:
return None
if isinstance(flips, dict):
flips = list(flips.keys())
return np.asarray(flips, dtype="int64")
@property
def nx_flips(self):
flips = self.nattr_flips
if flips is None or len(flips) == 0:
return None
if isinstance(flips, dict):
flips = list(flips.keys())
return np.asarray(flips, dtype="int64")
@property
def ex_flips(self):
# TODO
return None
@property
def flips(self):
# TODO
return gf.BunchDict(edge_flips=self.edge_flips, nx_flips=self.nx_flips)
def show(self):
flips = self.edge_flips
if flips is not None and len(flips) != 0:
row, col = flips.T
w = self.graph.adj_matrix[row, col].A1
added = (w == 0)
removed = (w > 0)
print(f"Flip {flips.shape[0]} edges, where {added.sum()} are added, {removed.sum()} are removed.")
diff = self.graph.node_label[row[added]] != self.graph.node_label[col[added]]
ratio = diff.sum() / diff.size if diff.size else 0.
print(f"For added edges, {ratio:.2%} of them belong to different classes.")
same = self.graph.node_label[row[removed]] == self.graph.node_label[col[removed]]
ratio = same.sum() / same.size if same.size else 0.
print(f"For removed edges, {ratio:.2%} of them belong to the same class.")
else:
print("No edge flips found.")
# TODO: nattr_flips
```
#### File: targeted/common/nettack.py
```python
import warnings
import numpy as np
import scipy.sparse as sp
from numba import njit
import graphgallery as gg
from graphgallery import functional as gf
from graphgallery.attack.targeted import Common
from graphgallery.utils import tqdm
from numba.errors import NumbaDeprecationWarning, NumbaPendingDeprecationWarning
warnings.simplefilter('ignore', category=NumbaDeprecationWarning)
warnings.simplefilter('ignore', category=NumbaPendingDeprecationWarning)
from ..targeted_attacker import TargetedAttacker
@Common.register()
class Nettack(TargetedAttacker):
"""
Nettack class used for poisoning attacks on node classification models.
Copyright (C) 2018
<NAME>
Technical University of Munich
"""
# nettack can conduct feature attack
_allow_feature_attack = True
def process(self, W_surrogate, reset=True):
self.W = W_surrogate
sparse_x = sp.csr_matrix(self.graph.node_attr)
self.cooc_matrix = sparse_x.T @ sparse_x
self.sparse_x = sparse_x
if reset:
self.reset()
return self
def reset(self):
super().reset()
self.modified_adj = self.graph.adj_matrix.copy()
self.modified_nx = self.sparse_x.copy()
self.adj_norm = gf.normalize_adj(self.modified_adj)
self.adj_flips = []
self.nattr_flips = []
self.influence_nodes = []
self.potential_edges = []
self.cooc_constraint = None
return self
def compute_cooccurrence_constraint(self, nodes):
"""
Co-occurrence constraint as described in the paper.
Parameters
----------
nodes: np.array
Nodes whose features are considered for change
Returns
-------
np.array [len(nodes), num_attrs], dtype bool
Binary matrix of dimension len(nodes) x num_attrs. A 1 in entry n,d indicates that
we are allowed to add feature d to the features of node n.
"""
num_nodes, num_attrs = self.modified_nx.shape
words_graph = self.cooc_matrix - sp.diags(self.cooc_matrix.diagonal())
words_graph.eliminate_zeros()
# words_graph.setdiag(0)
words_graph.data = words_graph.data > 0
word_degrees = words_graph.sum(0).A1
inv_word_degrees = np.reciprocal(word_degrees.astype(float) + 1e-8)
sd = np.zeros(num_nodes)
for n in range(num_nodes):
n_idx = self.modified_nx[n, :].nonzero()[1]
sd[n] = np.sum(inv_word_degrees[n_idx.tolist()])
scores_matrix = sp.lil_matrix((num_nodes, num_attrs))
for n in nodes:
common_words = words_graph.multiply(self.modified_nx[n])
idegs = inv_word_degrees[common_words.nonzero()[1]]
nnz = common_words.nonzero()[0]
scores = np.array(
[idegs[nnz == ix].sum() for ix in range(num_attrs)])
scores_matrix[n] = scores
self.cooc_constraint = sp.csr_matrix(
scores_matrix - 0.5 * sd[:, None] > 0)
def gradient_wrt_x(self, label):
"""
Compute the gradient of the logit belonging to the class of the input label with respect to the input features.
Parameters
----------
label: int
Class whose logits are of interest
Returns
-------
np.array [num_nodes, num_attrs] matrix containing the gradients.
"""
return (self.adj_norm @ self.adj_norm)[self.target].T @ sp.coo_matrix(
self.W[:, label].reshape(1, -1))
def compute_logits(self):
"""
Compute the logits of the surrogate model, i.e. linearized GCN.
Returns
-------
np.array, [num_nodes, num_classes]
The log probabilities for each node.
"""
return (self.adj_norm @ self.adj_norm @ self.modified_nx
@ self.W)[self.target].ravel()
def strongest_wrong_class(self, logits):
"""
Determine the incorrect class with largest logits.
Parameters
----------
logits: np.array, [num_nodes, num_classes]
The input logits
Returns
-------
np.array, [num_nodes, L]
The indices of the wrong labels with the highest attached log probabilities.
"""
target_label_onehot = np.eye(self.num_classes)[self.target_label]
return (logits - 1000 * target_label_onehot).argmax()
def feature_scores(self):
"""
Compute feature scores for all possible feature changes.
"""
if self.cooc_constraint is None:
self.compute_cooccurrence_constraint(self.influence_nodes)
logits = self.compute_logits()
best_wrong_class = self.strongest_wrong_class(logits)
gradient = self.gradient_wrt_x(
self.target_label) - self.gradient_wrt_x(best_wrong_class)
surrogate_loss = logits[self.target_label] - logits[best_wrong_class]
gradients_flipped = (gradient * -1).tolil()
gradients_flipped[self.modified_nx.nonzero()] *= -1
X_influencers = sp.lil_matrix(self.modified_nx.shape)
X_influencers[self.influence_nodes] = self.modified_nx[
self.influence_nodes]
gradients_flipped = gradients_flipped.multiply(
(self.cooc_constraint + X_influencers) > 0)
nnz_ixs = np.array(gradients_flipped.nonzero()).T
sorting = np.argsort(gradients_flipped[tuple(nnz_ixs.T)]).A1
sorted_ixs = nnz_ixs[sorting]
grads = gradients_flipped[tuple(nnz_ixs[sorting].T)]
scores = surrogate_loss - grads
return sorted_ixs[::-1], scores.A1[::-1]
def struct_score(self, a_hat_uv, XW):
"""
Compute structure scores, cf. Eq. 15 in the paper
Parameters
----------
a_hat_uv: sp.sparse_matrix, shape [P, 2]
Entries of matrix A_hat^2_u for each potential edge (see paper for explanation)
XW: np.array, shape [num_nodes, num_classes], dtype float
The class logits for each node.
Returns
-------
np.array [P,]
The struct score for every row in a_hat_uv
"""
logits = a_hat_uv @ XW
label_onehot = np.eye(self.num_classes)[self.target_label]
best_wrong_class_logits = (logits - 1000 * label_onehot).max(1)
logits_for_correct_class = logits[:, self.target_label]
struct_scores = logits_for_correct_class - best_wrong_class_logits
return struct_scores
def compute_XW(self):
"""
Shortcut to compute the dot product of X and W
Returns
-------
x @ W: np.array, shape [num_nodes, num_classes]
"""
return self.modified_nx @ self.W
def get_attacker_nodes(self, n=5, add_additional_nodes=False):
"""
Determine the influencer nodes to attack node i based on the weights W and the attributes X.
Parameters
----------
n: int, default: 5
The desired number of attacker nodes.
add_additional_nodes: bool, default: False
if True and the degree of node i (d_u) is < n, we select n-d_u additional attackers, which should
get connected to u afterwards (outside this function).
Returns
-------
np.array, shape [n,]:
The indices of the attacker nodes.
optional: np.array, shape [n - degree(n)]
if additional_nodes is True, we separately
return the additional attacker node indices
"""
assert n < self.num_nodes - 1, "number of influencers cannot be >= number of nodes in the graph!"
# neighbors = self.modified_adj[self.target].nonzero()[1]
neighbors = self.modified_adj[self.target].indices
# assert self.target not in neighbors
potential_edges = np.column_stack((np.tile(self.target, len(neighbors)), neighbors)).astype("int32")
# The new A_hat_square_uv values that we would get if we removed the edge from u to each of the neighbors, respectively
a_hat_uv = self.compute_new_a_hat_uv(potential_edges)
XW = self.compute_XW()
# compute the struct scores for all neighbors
struct_scores = self.struct_score(a_hat_uv, XW)
if len(neighbors) >= n: # do we have enough neighbors for the number of desired influencers?
influence_nodes = neighbors[np.argsort(struct_scores)[:n]]
if add_additional_nodes:
return influence_nodes, np.array([])
return influence_nodes
else:
influence_nodes = neighbors
if add_additional_nodes: # Add additional influencers by connecting them to u first.
# Compute the set of possible additional influencers, i.e. all nodes except the ones
# that are already connected to u.
poss_add_infl = np.setdiff1d(np.setdiff1d(np.arange(self.num_nodes), neighbors), self.target)
n_possible_additional = len(poss_add_infl)
n_additional_attackers = n - len(neighbors)
possible_edges = np.column_stack((np.tile(self.target, n_possible_additional), poss_add_infl)).astype("int32")
# Compute the struct_scores for all possible additional influencers, and choose the one
# with the best struct score.
a_hat_uv_additional = self.compute_new_a_hat_uv(possible_edges)
additional_struct_scores = self.struct_score(a_hat_uv_additional, XW)
additional_influencers = poss_add_infl[np.argsort(additional_struct_scores)[-n_additional_attackers::]]
return influence_nodes, additional_influencers
else:
return influence_nodes
def compute_new_a_hat_uv(self, potential_edges):
"""
Compute the updated A_hat_square_uv entries that would result from inserting/deleting the input edges,
for every edge.
Parameters
----------
potential_edges: np.array, shape [P,2], dtype int
The edges to check.
Returns
-------
sp.sparse_matrix: updated A_hat_square_u entries, a sparse PxN matrix, where P is len(possible_edges).
"""
edges = np.transpose(self.modified_adj.nonzero())
edges_set = {tuple(e) for e in edges}
A_hat_sq = self.adj_norm @ self.adj_norm
values_before = A_hat_sq[self.target].toarray()[0]
node_ixs = np.unique(edges[:, 0], return_index=True)[1].astype("int32")
twohop_ixs = np.transpose(A_hat_sq.nonzero())
degrees = self.modified_adj.sum(0).A1 + 1
# Ignore warnings:
# NumbaPendingDeprecationWarning:
# Encountered the use of a type that is scheduled for deprecation: type 'reflected set' found for argument 'edges_set' of function 'compute_new_a_hat_uv'.
# For more information visit http://numba.pydata.org/numba-doc/latest/reference/deprecation.html#deprecation-of-reflection-for-list-and-set-types
with warnings.catch_warnings(record=True):
warnings.filterwarnings(
'ignore',
'.*Encountered the use of a type that is scheduled for deprecation*'
)
ixs, vals = compute_new_a_hat_uv(edges, node_ixs, edges_set,
twohop_ixs, values_before,
degrees, potential_edges,
self.target)
ixs_arr = np.array(ixs)
a_hat_uv = sp.coo_matrix((vals, (ixs_arr[:, 0], ixs_arr[:, 1])),
shape=[len(potential_edges), self.num_nodes])
return a_hat_uv
def attack(self,
target,
num_budgets=None,
direct_attack=True,
structure_attack=True,
feature_attack=False,
n_influencers=5,
ll_constraint=True,
ll_cutoff=0.004,
disable=False):
super().attack(target, num_budgets, direct_attack, structure_attack,
feature_attack)
if feature_attack and not self.graph.is_binary():
raise RuntimeError(
"Currently only attack binary node attributes are supported")
if ll_constraint and self.allow_singleton:
raise RuntimeError(
'`ll_constraint` is failed when `allow_singleton=True`, please set `attacker.allow_singleton=False`.'
)
logits_start = self.compute_logits()
best_wrong_class = self.strongest_wrong_class(logits_start)
if structure_attack and ll_constraint:
# Setup starting values of the likelihood ratio test.
degree_sequence_start = self.degree
current_degree_sequence = self.degree.astype('float64')
d_min = 2
S_d_start = np.sum(
np.log(degree_sequence_start[degree_sequence_start >= d_min]))
current_S_d = np.sum(
np.log(
current_degree_sequence[current_degree_sequence >= d_min]))
n_start = np.sum(degree_sequence_start >= d_min)
current_n = np.sum(current_degree_sequence >= d_min)
alpha_start = compute_alpha(n_start, S_d_start, d_min)
log_likelihood_orig = compute_log_likelihood(
n_start, alpha_start, S_d_start, d_min)
if len(self.influence_nodes) == 0:
if not direct_attack:
# Choose influencer nodes
infls, add_infls = self.get_attacker_nodes(
n_influencers, add_additional_nodes=True)
self.influence_nodes = np.concatenate((infls, add_infls))
# Potential edges are all edges from any attacker to any other node, except the respective
# attacker itself or the node being attacked.
self.potential_edges = np.row_stack([
np.column_stack(
(np.tile(infl, self.num_nodes - 2),
np.setdiff1d(np.arange(self.num_nodes),
np.array([self.target, infl]))))
for infl in self.influence_nodes
])
else:
# direct attack
influencers = [self.target]
self.potential_edges = np.column_stack(
(np.tile(self.target, self.num_nodes - 1),
np.setdiff1d(np.arange(self.num_nodes), self.target)))
self.influence_nodes = np.array(influencers)
self.potential_edges = self.potential_edges.astype("int32")
for it in tqdm(range(self.num_budgets),
desc='Peturbing Graph',
disable=disable):
if structure_attack:
# Do not consider edges that, if removed, result in singleton edges in the graph.
if not self.allow_singleton:
filtered_edges = gf.singleton_filter(self.potential_edges, self.modified_adj).astype("int32")
else:
filtered_edges = self.potential_edges
if ll_constraint:
# Update the values for the power law likelihood ratio test.
deltas = 2 * (1 - self.modified_adj[tuple(
filtered_edges.T)].A.ravel()) - 1
d_edges_old = current_degree_sequence[filtered_edges]
d_edges_new = current_degree_sequence[
filtered_edges] + deltas[:, None]
new_S_d, new_n = update_Sx(current_S_d, current_n,
d_edges_old, d_edges_new, d_min)
new_alphas = compute_alpha(new_n, new_S_d, d_min)
new_ll = compute_log_likelihood(new_n, new_alphas, new_S_d,
d_min)
alphas_combined = compute_alpha(new_n + n_start,
new_S_d + S_d_start, d_min)
new_ll_combined = compute_log_likelihood(
new_n + n_start, alphas_combined, new_S_d + S_d_start,
d_min)
new_ratios = -2 * new_ll_combined + 2 * (
new_ll + log_likelihood_orig)
# Do not consider edges that, if added/removed, would lead to a violation of the
# likelihood ration Chi_square cutoff value.
powerlaw_filter = filter_chisquare(new_ratios, ll_cutoff)
filtered_edges = filtered_edges[powerlaw_filter]
# Compute new entries in A_hat_square_uv
a_hat_uv_new = self.compute_new_a_hat_uv(filtered_edges)
# Compute the struct scores for each potential edge
struct_scores = self.struct_score(a_hat_uv_new,
self.compute_XW())
best_edge_ix = struct_scores.argmin()
best_edge_score = struct_scores.min()
best_edge = filtered_edges[best_edge_ix]
if feature_attack:
# Compute the feature scores for each potential feature perturbation
feature_ixs, feature_scores = self.feature_scores()
best_feature_ix = feature_ixs[0]
best_feature_score = feature_scores[0]
if structure_attack and feature_attack:
# decide whether to choose an edge or feature to change
if best_edge_score < best_feature_score:
change_structure = True
else:
change_structure = False
elif structure_attack:
change_structure = True
elif feature_attack:
change_structure = False
if change_structure:
# perform edge perturbation
u, v = best_edge
modified_adj = self.modified_adj.tolil(copy=False)
modified_adj[(u, v)] = modified_adj[(
v, u)] = 1 - modified_adj[(u, v)]
self.modified_adj = modified_adj.tocsr(copy=False)
self.adj_norm = gf.normalize_adj(modified_adj)
self.adj_flips.append((u, v))
if ll_constraint:
# Update likelihood ratio test values
current_S_d = new_S_d[powerlaw_filter][best_edge_ix]
current_n = new_n[powerlaw_filter][best_edge_ix]
current_degree_sequence[best_edge] += deltas[
powerlaw_filter][best_edge_ix]
else:
modified_nx = self.modified_nx.tolil(copy=False)
modified_nx[tuple(
best_feature_ix)] = 1 - modified_nx[tuple(best_feature_ix)]
self.modified_nx = modified_nx.tocsr(copy=False)
self.nattr_flips.append(tuple(best_feature_ix))
return self
@njit
def connected_after(u, v, connected_before, delta):
if u == v:
if delta == -1:
return False
else:
return True
else:
return connected_before
@njit
def compute_new_a_hat_uv(edge_ixs, node_nb_ixs, edges_set, twohop_ixs,
values_before, degs, potential_edges, u):
"""
Compute the new values [A_hat_square]_u for every potential edge, where u is the target node. C.f. Theorem 5.1
equation 17.
Parameters
----------
edge_ixs: np.array, shape [E,2], where E is the number of edges in the graph.
The indices of the nodes connected by the edges in the input graph.
node_nb_ixs: np.array, shape [num_nodes,], dtype int
For each node, this gives the first index of edges associated to this node in the edge array (edge_ixs).
This will be used to quickly look up the neighbors of a node, since numba does not allow nested lists.
edges_set: set((e0, e1))
The set of edges in the input graph, i.e. e0 and e1 are two nodes connected by an edge
twohop_ixs: np.array, shape [T, 2], where T is the number of edges in A_tilde^2
The indices of nodes that are in the twohop neighborhood of each other, including self-loops.
values_before: np.array, shape [num_nodes,], the values in [A_hat]^2_uv to be updated.
degs: np.array, shape [num_nodes,], dtype int
The degree of the nodes in the input graph.
potential_edges: np.array, shape [P, 2], where P is the number of potential edges.
The potential edges to be evaluated. For each of these potential edges, this function will compute the values
in [A_hat]^2_uv that would result after inserting/removing this edge.
u: int
The target node
Returns
-------
return_ixs: List of tuples
The ixs in the [P, num_nodes] matrix of updated values that have changed
return_values:
"""
num_nodes = degs.shape[0]
twohop_u = twohop_ixs[twohop_ixs[:, 0] == u, 1]
nbs_u = edge_ixs[edge_ixs[:, 0] == u, 1]
nbs_u_set = set(nbs_u)
return_ixs = []
return_values = []
for ix in range(len(potential_edges)):
edge = potential_edges[ix]
edge_set = set(edge)
degs_new = degs.copy()
delta = -2 * ((edge[0], edge[1]) in edges_set) + 1
degs_new[edge] += delta
nbs_edge0 = edge_ixs[edge_ixs[:, 0] == edge[0], 1]
nbs_edge1 = edge_ixs[edge_ixs[:, 0] == edge[1], 1]
affected_nodes = set(np.concatenate((twohop_u, nbs_edge0, nbs_edge1)))
affected_nodes = affected_nodes.union(edge_set)
a_um = edge[0] in nbs_u_set
a_un = edge[1] in nbs_u_set
a_un_after = connected_after(u, edge[0], a_un, delta)
a_um_after = connected_after(u, edge[1], a_um, delta)
for v in affected_nodes:
a_uv_before = v in nbs_u_set
a_uv_before_sl = a_uv_before or v == u
if v in edge_set and u in edge_set and u != v:
if delta == -1:
a_uv_after = False
else:
a_uv_after = True
else:
a_uv_after = a_uv_before
a_uv_after_sl = a_uv_after or v == u
from_ix = node_nb_ixs[v]
to_ix = node_nb_ixs[v + 1] if v < num_nodes - 1 else len(edge_ixs)
node_nbs = edge_ixs[from_ix:to_ix, 1]
node_nbs_set = set(node_nbs)
a_vm_before = edge[0] in node_nbs_set
a_vn_before = edge[1] in node_nbs_set
a_vn_after = connected_after(v, edge[0], a_vn_before, delta)
a_vm_after = connected_after(v, edge[1], a_vm_before, delta)
mult_term = 1 / np.sqrt(degs_new[u] * degs_new[v])
sum_term1 = np.sqrt(degs[u] * degs[v]) * values_before[v] - a_uv_before_sl / degs[u] - a_uv_before / \
degs[v]
sum_term2 = a_uv_after / degs_new[v] + a_uv_after_sl / degs_new[u]
sum_term3 = -((a_um and a_vm_before) / degs[edge[0]]) + (
a_um_after and a_vm_after) / degs_new[edge[0]]
sum_term4 = -((a_un and a_vn_before) / degs[edge[1]]) + (
a_un_after and a_vn_after) / degs_new[edge[1]]
new_val = mult_term * (sum_term1 + sum_term2 + sum_term3 +
sum_term4)
return_ixs.append((ix, v))
return_values.append(new_val)
return return_ixs, return_values
def compute_alpha(n, S_d, d_min):
"""
Approximate the alpha of a power law distribution.
Parameters
----------
n: int or np.array of int
Number of entries that are larger than or equal to d_min
S_d: float or np.array of float
Sum of log degrees in the distribution that are larger than or equal to d_min
d_min: int
The minimum degree of nodes to consider
Returns
-------
alpha: float
The estimated alpha of the power law distribution
"""
return n / (S_d - n * np.log(d_min - 0.5)) + 1
def update_Sx(S_old, n_old, d_old, d_new, d_min):
"""
Update on the sum of log degrees S_d and n based on degree distribution resulting from inserting or deleting
a single edge.
Parameters
----------
S_old: float
Sum of log degrees in the distribution that are larger than or equal to d_min.
n_old: int
Number of entries in the old distribution that are larger than or equal to d_min.
d_old: np.array, shape [num_nodes,] dtype int
The old degree sequence.
d_new: np.array, shape [num_nodes,] dtype int
The new degree sequence
d_min: int
The minimum degree of nodes to consider
Returns
-------
new_S_d: float, the updated sum of log degrees in the distribution that are larger than or equal to d_min.
new_n: int, the updated number of entries in the old distribution that are larger than or equal to d_min.
"""
old_in_range = d_old >= d_min
new_in_range = d_new >= d_min
d_old_in_range = np.multiply(d_old, old_in_range)
d_new_in_range = np.multiply(d_new, new_in_range)
new_S_d = S_old - np.log(np.maximum(d_old_in_range, 1)).sum(1) + np.log(
np.maximum(d_new_in_range, 1)).sum(1)
new_n = n_old - np.sum(old_in_range, 1) + np.sum(new_in_range, 1)
return new_S_d, new_n
def compute_log_likelihood(n, alpha, S_d, d_min):
"""
Compute log likelihood of the powerlaw fit.
Parameters
----------
n: int
Number of entries in the old distribution that are larger than or equal to d_min.
alpha: float
The estimated alpha of the power law distribution
S_d: float
Sum of log degrees in the distribution that are larger than or equal to d_min.
d_min: int
The minimum degree of nodes to consider
Returns
-------
float: the estimated log likelihood
"""
return n * np.log(alpha) + n * alpha * np.log(d_min) + (alpha + 1) * S_d
def filter_chisquare(ll_ratios, cutoff):
return ll_ratios < cutoff
```
#### File: targeted/common/node_embedding_attack.py
```python
import numpy as np
from numba import njit
import scipy.sparse as sp
import scipy.linalg as spl
import tensorflow as tf
from graphgallery import functional as gf
from graphgallery.attack.targeted import Common
from graphgallery.attack.utils.estimate_utils import (
estimate_loss_with_delta_eigenvals,
estimate_loss_with_perturbation_gradient)
from ..targeted_attacker import TargetedAttacker
@Common.register()
class NodeEmbeddingAttack(TargetedAttacker):
def process(self, K=50, reset=True):
self.nodes_set = set(range(self.num_nodes))
adj_matrix = self.graph.adj_matrix
deg_matrix = sp.diags(self.degree, dtype=adj_matrix.dtype)
if K:
self.vals_org, self.vecs_org = sp.linalg.eigsh(adj_matrix, k=K, M=deg_matrix)
else:
self.vals_org, self.vecs_org = spl.eigh(adj_matrix.toarray(), deg_matrix.toarray())
if reset:
self.reset()
return self
def attack(self,
target,
num_budgets=None,
dim=32,
window_size=5,
direct_attack=True,
structure_attack=True,
feature_attack=False):
super().attack(target, num_budgets, direct_attack, structure_attack,
feature_attack)
num_budgets = self.num_budgets
num_nodes = self.num_nodes
adj = self.graph.adj_matrix
if direct_attack:
influence_nodes = [target]
candidates = np.column_stack(
(np.tile(target,
num_nodes - 1), list(self.nodes_set - set([target]))))
else:
influence_nodes = adj[target].indices
candidates = np.row_stack([
np.column_stack((np.tile(infl, num_nodes - 2),
list(self.nodes_set - set([target, infl]))))
for infl in influence_nodes
])
if not self.allow_singleton:
candidates = gf.singleton_filter(candidates, adj)
delta_w = 1. - 2 * adj[candidates[:, 0], candidates[:, 1]].A1
loss_for_candidates = estimate_loss_with_delta_eigenvals(
candidates, delta_w, self.vals_org, self.vecs_org, self.num_nodes,
dim, window_size)
self.adj_flips = candidates[loss_for_candidates.argsort()
[-num_budgets:]]
return self
```
#### File: targeted/tensorflow/fgsm.py
```python
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import sparse_categorical_crossentropy
import graphgallery as gg
from graphgallery import functional as gf
from graphgallery.utils import tqdm
from graphgallery.attack.targeted import TensorFlow
from ..targeted_attacker import TargetedAttacker
@TensorFlow.register()
class FGSM(TargetedAttacker):
def process(self, surrogate, reset=True):
if isinstance(surrogate, gg.gallery.nodeclas.Trainer):
surrogate = surrogate.model
with tf.device(self.device):
self.surrogate = surrogate
self.loss_fn = sparse_categorical_crossentropy
self.x_tensor = gf.astensor(self.graph.node_attr)
if reset:
self.reset()
return self
def reset(self):
super().reset()
self.modified_degree = self.degree.copy()
with tf.device(self.device):
modified_adj = tf.Variable(self.graph.adj_matrix.A,
dtype=self.floatx)
self.modified_adj = modified_adj
self.adj_changes = tf.zeros_like(modified_adj)
return self
def attack(self,
target,
num_budgets=None,
direct_attack=True,
structure_attack=True,
feature_attack=False,
disable=False):
super().attack(target, num_budgets, direct_attack, structure_attack,
feature_attack)
if not direct_attack:
raise NotImplementedError(
f'{self.name} does NOT support indirect attack.')
target_index, target_label = gf.astensors([self.target],
self.target_label)
adj_matrix = self.graph.adj_matrix
for it in tqdm(range(self.num_budgets),
desc='Peturbing Graph',
disable=disable):
with tf.device(self.device):
gradients = self.compute_gradients(self.modified_adj,
self.adj_changes,
target_index, target_label)
modified_row = tf.gather(self.modified_adj, target_index)
gradients = (gradients *
(-2 * modified_row + 1)).numpy().ravel()
sorted_index = np.argsort(-gradients)
for index in sorted_index:
u = target
v = index % adj_matrix.shape[0]
exist = adj_matrix[u, v]
if exist and not self.allow_singleton and (
self.modified_degree[u] <= 1
or self.modified_degree[v] <= 1):
continue
if not self.is_modified(u, v):
self.adj_flips[(u, v)] = it
self.flip_edge(u, v, exist)
break
return self
@tf.function
def compute_gradients(self, modified_adj, adj_changes, target_index,
target_label):
with tf.GradientTape() as tape:
tape.watch(adj_changes)
adj = modified_adj + adj_changes
adj_norm = gf.normalize_adj_tensor(adj)
logit = self.surrogate([self.x_tensor, adj_norm])
logit = tf.gather(logit, target_index)
loss = self.loss_fn(target_label, logit, from_logits=True)
gradients = tape.gradient(loss, adj_changes)
return gradients
def flip_edge(self, u, v, exist):
weight = 1. - exist
delta_d = 2. * weight - 1.
self.modified_adj[u, v].assign(weight)
self.modified_adj[v, u].assign(weight)
self.modified_degree[u] += delta_d
self.modified_degree[v] += delta_d
```
#### File: untargeted/tensorflow/metattack.py
```python
import warnings
import numpy as np
import tensorflow as tf
from tensorflow.keras.initializers import glorot_uniform, zeros
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.activations import softmax, relu
from tensorflow.keras.losses import SparseCategoricalCrossentropy
import graphgallery as gg
from graphgallery import functional as gf
from graphgallery.utils import tqdm
# from graphadv.utils.graph_utils import likelihood_ratio_filter
from graphgallery.attack.untargeted import TensorFlow
from ..untargeted_attacker import UntargetedAttacker
class BaseMeta(UntargetedAttacker):
"""Base model for Mettack."""
# mettack can also conduct feature attack
_allow_feature_attack = True
def process(self,
train_nodes,
unlabeled_nodes,
self_training_labels,
hids,
use_relu,
reset=True):
self.ll_ratio = None
with tf.device(self.device):
self.train_nodes = gf.astensor(train_nodes, dtype=self.intx)
self.unlabeled_nodes = gf.astensor(unlabeled_nodes, dtype=self.intx)
self.labels_train = gf.astensor(self.graph.node_label[train_nodes], dtype=self.intx)
self.self_training_labels = gf.astensor(self_training_labels, dtype=self.intx)
self.adj_tensor = gf.astensor(self.graph.adj_matrix.A, dtype=self.floatx)
self.x_tensor = gf.astensor(self.graph.node_attr, dtype=self.floatx)
self.build(hids=hids)
self.use_relu = use_relu
self.loss_fn = SparseCategoricalCrossentropy(from_logits=True)
self.adj_changes = tf.Variable(tf.zeros_like(self.adj_tensor))
self.x_changes = tf.Variable(tf.zeros_like(self.x_tensor))
if reset:
self.reset()
return self
def reset(self):
super().reset()
self.adj_flips = []
self.nattr_flips = []
with tf.device(self.device):
self.adj_changes.assign(tf.zeros_like(self.adj_tensor))
self.x_changes.assign(tf.zeros_like(self.x_tensor))
return self
def filter_potential_singletons(self, modified_adj):
"""
Computes a mask for entries potentially leading to singleton nodes, i.e. one of the two nodes corresponding to
the entry have degree 1 and there is an edge between the two nodes.
Returns
-------
tf.Tensor shape [N, N], float with ones everywhere except the entries of potential singleton nodes,
where the returned tensor has value 0.
"""
N = self.num_nodes
degrees = tf.reduce_sum(modified_adj, axis=1)
degree_one = tf.equal(degrees, 1)
resh = tf.reshape(tf.tile(degree_one, [N]), [N, N])
l_and = tf.logical_and(resh, tf.equal(modified_adj, 1))
logical_and_symmetric = tf.logical_or(l_and, tf.transpose(l_and))
flat_mask = 1. - tf.cast(logical_and_symmetric, self.floatx)
return flat_mask
# def log_likelihood_constraint(self, adj, modified_adj, ll_cutoff):
# """
# Computes a mask for entries that, if the edge corresponding to the entry is added/removed, would lead to the
# log likelihood constraint to be violated.
# """
# t_d_min = tf.constant(2., dtype=self.floatx)
# t_possible_edges = tf.constant(np.array(
# np.triu(np.ones([self.num_nodes, self.num_nodes]),
# k=1).nonzero()).T,
# dtype=self.intx)
# allowed_mask, current_ratio = likelihood_ratio_filter(
# t_possible_edges, modified_adj, adj, t_d_min, ll_cutoff)
# return allowed_mask, current_ratio
@tf.function
def get_perturbed_adj(self, adj, adj_changes):
adj_changes_square = adj_changes - tf.linalg.band_part(adj_changes, 0, 0)
adj_changes_sym = adj_changes_square + tf.transpose(adj_changes_square)
clipped_adj_changes = self.clip(adj_changes_sym)
return adj + clipped_adj_changes
@tf.function
def get_perturbed_x(self, x, x_changes):
return x + self.clip(x_changes)
def forward(self, x, adj):
h = x
for w in self.weights[:-1]:
h = adj @ h @ w
if self.use_relu:
h = relu(h)
return adj @ h @ self.weights[-1]
def structure_score(self,
modified_adj,
adj_grad,
ll_constraint=None,
ll_cutoff=None):
adj_meta_grad = adj_grad * (-2. * modified_adj + 1.)
# Make sure that the minimum entry is 0.
adj_meta_grad -= tf.reduce_min(adj_meta_grad)
# if not self.allow_singleton:
# # Set entries to 0 that could lead to singleton nodes.
# singleton_mask = self.filter_potential_singletons(modified_adj)
# adj_meta_grad *= singleton_mask
# if ll_constraint:
# allowed_mask, self.ll_ratio = self.log_likelihood_constraint(
# modified_adj, self.adj_tensor, ll_cutoff)
# adj_meta_grad = adj_meta_grad * allowed_mask
return tf.reshape(adj_meta_grad, [-1])
def feature_score(self, modified_nx, x_grad):
x_meta_grad = x_grad * (-2. * modified_nx + 1.)
x_meta_grad -= tf.reduce_min(x_meta_grad)
return tf.reshape(x_meta_grad, [-1])
def clip(self, matrix):
clipped_matrix = tf.clip_by_value(matrix, -1., 1.)
return clipped_matrix
@TensorFlow.register()
class Metattack(BaseMeta):
def process(self,
train_nodes,
unlabeled_nodes,
self_training_labels,
hids=[16],
lr=0.1,
epochs=100,
momentum=0.9,
lambda_=0.,
use_relu=True,
reset=True):
self.lr = lr
self.epochs = epochs
self.momentum = momentum
self.lambda_ = lambda_
if lambda_ not in (0., 0.5, 1.):
raise ValueError(
'Invalid value of `lambda_`, allowed values [0: (meta-self), 1: (meta-train), 0.5: (meta-both)].'
)
return super().process(train_nodes=train_nodes,
unlabeled_nodes=unlabeled_nodes,
self_training_labels=self_training_labels,
hids=hids,
use_relu=use_relu,
reset=reset)
def build(self, hids):
hids = gf.repeat(hids)
weights, w_velocities = [], []
zeros_initializer = zeros()
pre_hid = self.num_attrs
for hid in hids + [self.num_classes]:
shape = (pre_hid, hid)
# use zeros_initializer temporary to save time
weight = tf.Variable(zeros_initializer(shape=shape, dtype=self.floatx))
w_velocity = tf.Variable(zeros_initializer(shape=shape, dtype=self.floatx))
weights.append(weight)
w_velocities.append(w_velocity)
pre_hid = hid
self.weights, self.w_velocities = weights, w_velocities
def initialize(self):
w_initializer = glorot_uniform()
zeros_initializer = zeros()
for w, wv in zip(self.weights, self.w_velocities):
w.assign(w_initializer(w.shape, dtype=self.floatx))
wv.assign(zeros_initializer(wv.shape, dtype=self.floatx))
@tf.function
def train_step(self, x, adj, index, labels):
with tf.GradientTape() as tape:
output = self.forward(x, adj)
logit = tf.gather(output, index)
loss = self.loss_fn(labels, logit)
weight_grads = tape.gradient(loss, self.weights)
return weight_grads
def inner_train(self, x, adj):
self.initialize()
adj_norm = gf.normalize_adj_tensor(adj)
for it in range(self.epochs):
weight_grads = self.train_step(x, adj_norm, self.train_nodes, self.labels_train)
for v, g in zip(self.w_velocities, weight_grads):
v.assign(self.momentum * v + g)
for w, v in zip(self.weights, self.w_velocities):
w.assign_sub(self.lr * v)
@tf.function
def meta_grad(self):
modified_adj, modified_nx = self.adj_tensor, self.x_tensor
adj_tensor, x_tensor = self.adj_tensor, self.x_tensor
persistent = self.structure_attack and self.feature_attack
with tf.GradientTape(persistent=persistent) as tape:
if self.structure_attack:
modified_adj = self.get_perturbed_adj(adj_tensor, self.adj_changes)
if self.feature_attack:
modified_nx = self.get_perturbed_x(x_tensor, self.x_changes)
adj_norm = gf.normalize_adj_tensor(modified_adj)
output = self.forward(modified_nx, adj_norm)
logit_labeled = tf.gather(output, self.train_nodes)
logit_unlabeled = tf.gather(output, self.unlabeled_nodes)
loss_labeled = self.loss_fn(self.labels_train, logit_labeled)
loss_unlabeled = self.loss_fn(self.self_training_labels, logit_unlabeled)
attack_loss = self.lambda_ * loss_labeled + (1 - self.lambda_) * loss_unlabeled
adj_grad, x_grad = None, None
if self.feature_attack:
x_grad = tape.gradient(attack_loss, self.x_changes)
if self.structure_attack:
adj_grad = tape.gradient(attack_loss, self.adj_changes)
return x_grad, adj_grad
def attack(self,
num_budgets=0.05,
structure_attack=True,
feature_attack=False,
ll_constraint=False,
ll_cutoff=0.004,
disable=False):
super().attack(num_budgets, structure_attack, feature_attack)
if ll_constraint:
raise NotImplementedError(
"`log_likelihood_constraint` has not been well tested."
" Please set `ll_constraint=False` to achieve a better performance."
)
if feature_attack and not self.graph.is_binary():
raise ValueError(
"Attacks on the node features are currently only supported for binary attributes."
)
with tf.device(self.device):
modified_adj, modified_nx = self.adj_tensor, self.x_tensor
adj_tensor, x_tensor = self.adj_tensor, self.x_tensor
adj_changes, x_changes = self.adj_changes, self.x_changes
adj_flips, nattr_flips = self.adj_flips, self.nattr_flips
self.inner_train(modified_nx, modified_adj)
for it in tqdm(range(self.num_budgets),
desc='Peturbing Graph',
disable=disable):
if structure_attack:
modified_adj = self.get_perturbed_adj(adj_tensor, adj_changes)
if feature_attack:
modified_nx = self.get_perturbed_x(x_tensor,x_changes)
self.inner_train(modified_nx, modified_adj)
x_grad, adj_grad = self.meta_grad()
adj_meta_score = tf.constant(0.0)
x_meta_score = tf.constant(0.0)
if structure_attack:
adj_meta_score = self.structure_score(modified_adj, adj_grad, ll_constraint, ll_cutoff)
if feature_attack:
x_meta_score = self.feature_score(modified_nx, x_grad)
if tf.reduce_max(adj_meta_score) >= tf.reduce_max(x_meta_score) and structure_attack:
adj_meta_argmax = tf.argmax(adj_meta_score)
row, col = divmod(adj_meta_argmax.numpy(), self.num_nodes)
adj_changes[row, col].assign(-2. * modified_adj[row, col] + 1.)
adj_changes[col, row].assign(-2. * modified_adj[col, row] + 1.)
adj_flips.append((row, col))
elif tf.reduce_max(adj_meta_score) < tf.reduce_max(x_meta_score) and feature_attack:
x_meta_argmax = tf.argmax(x_meta_score)
row, col = divmod(x_meta_argmax.numpy(), self.num_attrs)
x_changes[row, col].assign(-2 * modified_nx[row, col] + 1)
nattr_flips.append((row, col))
else:
warnings.warn(f"Do nothing at iter {it}. adj_meta_score={adj_meta_score}, x_meta_score={x_meta_score}",
UserWarning)
@TensorFlow.register()
class MetaApprox(BaseMeta):
def process(self,
train_nodes,
unlabeled_nodes,
self_training_labels,
hids=[16],
lr=0.1,
epochs=100,
lambda_=0.,
use_relu=True):
self.lr = lr
self.epochs = epochs
self.lambda_ = lambda_
if lambda_ not in (0., 0.5, 1.):
raise ValueError(
'Invalid value of `lambda_`, allowed values [0: (meta-self), 1: (meta-train), 0.5: (meta-both)].'
)
return super().process(train_nodes=train_nodes,
unlabeled_nodes=unlabeled_nodes,
self_training_labels=self_training_labels,
hids=hids,
use_relu=use_relu)
def build(self, hids):
hids = gf.repeat(hids)
weights = []
zeros_initializer = zeros()
pre_hid = self.num_attrs
for hid in hids + [self.num_classes]:
shape = (pre_hid, hid)
# use zeros_initializer temporary to save time
weight = tf.Variable(zeros_initializer(shape=shape, dtype=self.floatx))
weights.append(weight)
pre_hid = hid
self.weights = weights
self.adj_grad_sum = tf.Variable(tf.zeros_like(self.adj_tensor))
self.x_grad_sum = tf.Variable(tf.zeros_like(self.x_tensor))
self.optimizer = Adam(self.lr, epsilon=1e-8)
def initialize(self):
w_initializer = glorot_uniform()
zeros_initializer = zeros()
for w in self.weights:
w.assign(w_initializer(w.shape, dtype=self.floatx))
if self.structure_attack:
self.adj_grad_sum.assign(zeros_initializer(self.adj_grad_sum.shape, dtype=self.floatx))
if self.feature_attack:
self.x_grad_sum.assign(zeros_initializer(self.x_grad_sum.shape, dtype=self.floatx))
# reset optimizer
for var in self.optimizer.variables():
var.assign(tf.zeros_like(var))
@tf.function
def meta_grad(self):
self.initialize()
modified_adj, modified_nx = self.adj_tensor, self.x_tensor
adj_tensor, x_tensor = self.adj_tensor, self.x_tensor
adj_grad_sum, x_grad_sum = self.adj_grad_sum, self.x_grad_sum
optimizer = self.optimizer
for it in tf.range(self.epochs):
with tf.GradientTape(persistent=True) as tape:
if self.structure_attack:
modified_adj = self.get_perturbed_adj(adj_tensor, self.adj_changes)
if self.feature_attack:
modified_nx = self.get_perturbed_x(x_tensor, self.x_changes)
adj_norm = gf.normalize_adj_tensor(modified_adj)
output = self.forward(modified_nx, adj_norm)
logit_labeled = tf.gather(output, self.train_nodes)
logit_unlabeled = tf.gather(output, self.unlabeled_nodes)
loss_labeled = self.loss_fn(self.labels_train, logit_labeled)
loss_unlabeled = self.loss_fn(self.self_training_labels, logit_unlabeled)
attack_loss = self.lambda_ * loss_labeled + (1 - self.lambda_) * loss_unlabeled
adj_grad, x_grad = None, None
gradients = tape.gradient(loss_labeled, self.weights)
optimizer.apply_gradients(zip(gradients, self.weights))
if self.structure_attack:
adj_grad = tape.gradient(attack_loss, self.adj_changes)
adj_grad_sum.assign_add(adj_grad)
if self.feature_attack:
x_grad = tape.gradient(attack_loss, self.x_changes)
x_grad_sum.assign_add(x_grad)
del tape
return x_grad_sum, adj_grad_sum
def attack(self,
num_budgets=0.05,
structure_attack=True,
feature_attack=False,
ll_constraint=False,
ll_cutoff=0.004,
disable=False):
super().attack(num_budgets, structure_attack, feature_attack)
if ll_constraint:
raise NotImplementedError(
"`log_likelihood_constraint` has not been well tested."
" Please set `ll_constraint=False` to achieve a better performance."
)
if feature_attack and not self.graph.is_binary():
raise ValueError(
"Attacks on the node features are currently only supported for binary attributes."
)
with tf.device(self.device):
modified_adj, modified_nx = self.adj_tensor, self.x_tensor
adj_tensor, x_tensor = self.adj_tensor, self.x_tensor
adj_changes, x_changes = self.adj_changes, self.x_changes
adj_flips, nattr_flips = self.adj_flips, self.nattr_flips
for it in tqdm(range(self.num_budgets),
desc='Peturbing Graph',
disable=disable):
x_grad, adj_grad = self.meta_grad()
adj_meta_score = tf.constant(0.0)
x_meta_score = tf.constant(0.0)
if structure_attack:
modified_adj = self.get_perturbed_adj(adj_tensor, adj_changes)
adj_meta_score = self.structure_score(modified_adj, adj_grad, ll_constraint, ll_cutoff)
if feature_attack:
modified_nx = self.get_perturbed_x(x_tensor, x_changes)
x_meta_score = self.feature_score(modified_nx, feature_grad)
if tf.reduce_max(adj_meta_score) >= tf.reduce_max(x_meta_score) and structure_attack:
adj_meta_argmax = tf.argmax(adj_meta_score)
row, col = divmod(adj_meta_argmax.numpy(), self.num_nodes)
adj_changes[row, col].assign(-2. * modified_adj[row, col] + 1.)
adj_changes[col, row].assign(-2. * modified_adj[col, row] + 1.)
adj_flips.append((row, col))
elif tf.reduce_max(adj_meta_score) < tf.reduce_max(x_meta_score) and feature_attack:
x_meta_argmax = tf.argmax(x_meta_score)
row, col = divmod(x_meta_argmax.numpy(), self.num_attrs)
x_changes[row, col].assign(-2 * modified_nx[row, col] + 1)
nattr_flips.append((row, col))
else:
warnings.warn(f"Do nothing at iter {it}. adj_meta_score={adj_meta_score}, x_meta_score={x_meta_score}",
UserWarning)
return self
```
#### File: graphgallery/data/reader.py
```python
import numpy as np
import scipy.sparse as sp
from graphgallery import functional as gf
from sklearn.preprocessing import LabelEncoder
from .io import read_csv, read_json
class Reader:
@staticmethod
def read_graphs(filepath):
graphs = read_json(filepath)
graphs = [gf.edge_to_sparse_adj(graphs[str(i)]) for i in range(len(graphs))]
return graphs
@staticmethod
def read_edges(filepath, src='id_1', dst='id_2'):
data = read_csv(filepath)
row = data[src].to_numpy()
col = data[dst].to_numpy()
N = max(row.max(), col.max()) + 1
graph = sp.csr_matrix((np.ones(row.shape[0], dtype=np.float32), (row, col)), shape=(N, N))
return graph
@staticmethod
def read_csv_features(filepath):
data = read_csv(filepath)
row = np.array(data["node_id"])
col = np.array(data["feature_id"])
values = np.array(data["value"])
node_count = max(row) + 1
feature_count = max(col) + 1
shape = (node_count, feature_count)
features = sp.csr_matrix((values, (row, col)), shape=shape)
return features
@staticmethod
def read_json_features(filepath):
data = read_json(filepath)
rows = []
cols = []
for k, v in data.items():
k = int(k)
rows += [k] * len(v)
cols += v
N = max(rows) + 1
M = max(cols) + 1
features = sp.csr_matrix((np.ones(len(rows), dtype=np.float32), (rows, cols)), shape=(N, M))
return features
@staticmethod
def read_target(filepath, return_target=True):
data = read_csv(filepath)
if return_target:
return data["target"].to_numpy()
else:
return data
```
#### File: graphgallery/functional/random.py
```python
import random
import torch
import numpy as np
import graphgallery as gg
import tensorflow as tf
__all__ = ["random_seed"]
def random_seed(seed=None, backend=None):
backend = gg.backend(backend)
np.random.seed(seed)
random.seed(seed)
if backend == "tensorflow":
tf.random.set_seed(seed)
else:
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
```
#### File: functional/sparse/neighbor_sampler.py
```python
import numpy as np
import scipy.sparse as sp
import numba
from numba import njit
from ..base_transforms import SparseTransform
from ..transform import Transform
from ..sparse import add_selfloops, eliminate_selfloops
@Transform.register()
class NeighborSampler(SparseTransform):
def __init__(self, max_degree: int = 25,
selfloop: bool = True,
add_dummy: bool = True):
super().__init__()
self.collect(locals())
def __call__(self, adj_matrix: sp.csr_matrix):
return neighbor_sampler(adj_matrix, max_degree=self.max_degree,
selfloop=self.selfloop, add_dummy=self.add_dummy)
@njit
def sample(indices, indptr, max_degree=25, add_dummy=True):
N = len(indptr) - 1
if add_dummy:
M = numba.int32(N) + np.zeros((N+1, max_degree), dtype=np.int32)
else:
M = np.zeros((N, max_degree), dtype=np.int32)
for n in range(N):
neighbors = indices[indptr[n]:indptr[n + 1]]
size = neighbors.size
if size > max_degree:
neighbors = np.random.choice(neighbors, max_degree, replace=False)
elif size < max_degree:
neighbors = np.random.choice(neighbors, max_degree, replace=True)
M[n] = neighbors
return M
def neighbor_sampler(adj_matrix: sp.csr_matrix, max_degree: int = 25,
selfloop: bool=True, add_dummy=True):
if selfloop:
adj_matrix = add_selfloops(adj_matrix)
else:
adj_matrix = eliminate_selfloops(adj_matrix)
M = sample(adj_matrix.indices, adj_matrix.indptr, max_degree=max_degree, add_dummy=add_dummy)
np.random.shuffle(M.T)
return M
```
#### File: gallery/embedding/laplacianeigenmaps.py
```python
import numpy as np
import scipy.sparse as sp
from sklearn import preprocessing
from utils import normalized_laplacian_matrix
class LaplacianEigenmaps:
r"""An implementation of `"Laplacian Eigenmaps" <https://papers.nips.cc/paper/1961-laplacian-eigenmaps-and-spectral-techniques-for-embedding-and-clustering>`_
from the NIPS '01 paper "Laplacian Eigenmaps and Spectral Techniques for Embedding and Clustering".
The procedure extracts the eigenvectors corresponding to the largest eigenvalues
of the graph Laplacian. These vectors are used as the node embedding.
"""
def __init__(self, dimensions: int = 32, seed: int = None):
self.dimensions = dimensions
self.seed = seed
def fit(self, graph: sp.csr_matrix):
"""
Fitting a Laplacian EigenMaps model.
"""
L_tilde = normalized_laplacian_matrix(graph)
_, self._embedding = sp.linalg.eigsh(L_tilde, k=self.dimensions,
which='SM', return_eigenvectors=True)
def get_embedding(self, normalize=True) -> np.array:
"""Getting the node embedding."""
embedding = self._embedding
if normalize:
embedding = preprocessing.normalize(embedding)
return embedding
```
#### File: gallery/embedding/nmfadmm.py
```python
import numpy as np
import scipy.sparse as sp
from sklearn import preprocessing
class NMFADMM:
r"""An implementation of `"NMF-ADMM" <http://statweb.stanford.edu/~dlsun/papers/nmf_admm.pdf>`_
from the ICASSP '14 paper "Alternating Direction Method of Multipliers for
Non-Negative Matrix Factorization with the Beta-Divergence". The procedure
learns an embedding of the normalized adjacency matrix with by using the alternating
direction method of multipliers to solve a non negative matrix factorization problem.
"""
def __init__(self, dimensions: int = 32, iterations: int = 100, rho: float = 1.0, seed: int = None):
self.dimensions = dimensions
self.iterations = iterations
self.rho = rho
self.seed = seed
def _init_weights(self):
"""
Initializing model weights.
"""
self._W = np.random.uniform(-0.1, 0.1, (self._V.shape[0], self.dimensions))
self._H = np.random.uniform(-0.1, 0.1, (self.dimensions, self._V.shape[1]))
X_i, Y_i = np.nonzero(self._V)
scores = self._W[X_i] * self._H[:, Y_i].T + np.random.uniform(0, 1, (self.dimensions, ))
values = np.sum(scores, axis=-1)
self._X = sp.coo_matrix((values, (X_i, Y_i)), shape=self._V.shape)
self._W_plus = np.random.uniform(0, 0.1, (self._V.shape[0], self.dimensions))
self._H_plus = np.random.uniform(0, 0.1, (self.dimensions, self._V.shape[1]))
self._alpha_X = sp.coo_matrix((np.zeros(values.shape[0]), (X_i, Y_i)), shape=self._V.shape)
self._alpha_W = np.zeros(self._W.shape)
self._alpha_H = np.zeros(self._H.shape)
def _update_W(self):
"""
Updating user_1 matrix.
"""
left = np.linalg.pinv(self._H.dot(self._H.T) + np.eye(self.dimensions))
right_1 = self._X.dot(self._H.T).T + self._W_plus.T
right_2 = (1.0 / self.rho) * (self._alpha_X.dot(self._H.T).T - self._alpha_W.T)
self.W = left.dot(right_1 + right_2).T
def _update_H(self):
"""
Updating user_2 matrix.
"""
left = np.linalg.pinv(self._W.T.dot(self._W) + np.eye(self.dimensions))
right_1 = self._X.T.dot(self._W).T + self._H_plus
right_2 = (1.0 / self.rho) * (self._alpha_X.T.dot(self._W).T - self._alpha_H)
self._H = left.dot(right_1 + right_2)
def _update_X(self):
"""
Updating user_1-user_2 matrix.
"""
iX, iY = np.nonzero(self._V)
values = np.sum(self._W[iX] * self._H[:, iY].T, axis=-1)
scores = sp.coo_matrix((values - 1, (iX, iY)), shape=self._V.shape)
left = self.rho * scores - self._alpha_X
right = (left.power(2) + 4.0 * self.rho * self._V).power(0.5)
self._X = (left + right) / (2 * self.rho)
def _update_W_plus(self):
"""
Updating positive primal user_1 factors.
"""
self._W_plus = np.maximum(self._W + (1 / self.rho) * self._alpha_W, 0)
def _update_H_plus(self):
"""
Updating positive primal user_2 factors.
"""
self._H_plus = np.maximum(self._H + (1 / self.rho) * self._alpha_H, 0)
def _update_alpha_X(self):
"""
Updating target matrix dual.
"""
iX, iY = np.nonzero(self._V)
values = np.sum(self._W[iX] * self._H[:, iY].T, axis=-1)
scores = sp.coo_matrix((values, (iX, iY)), shape=self._V.shape)
self._alpha_X = self._alpha_X + self.rho * (self._X - scores)
def _update_alpha_W(self):
"""
Updating user dual factors.
"""
self._alpha_W = self._alpha_W + self.rho * (self._W - self._W_plus)
def _update_alpha_H(self):
"""
Updating item dual factors.
"""
self._alpha_H = self._alpha_H + self.rho * (self._H - self._H_plus)
def _create_base_matrix(self, graph):
"""
Creating the normalized adjacency matrix.
"""
degree = graph.sum(1).A1
D_inverse = sp.diags(1.0 / degree, format="csr")
A_hat = D_inverse @ graph
return A_hat
def fit(self, graph: sp.csr_matrix):
"""
Fitting an NMF model on the normalized adjacency matrix with ADMM.
"""
self._V = self._create_base_matrix(graph)
self._init_weights()
for _ in range(self.iterations):
self._update_W()
self._update_H()
self._update_X()
self._update_W_plus()
self._update_H_plus()
self._update_alpha_X()
self._update_alpha_W()
self._update_alpha_H()
def get_embedding(self, normalize=True) -> np.array:
"""Getting the node embedding."""
embedding = np.concatenate([self._W_plus, self._H_plus.T], axis=1)
if normalize:
embedding = preprocessing.normalize(embedding)
return embedding
```
#### File: gallery/nodeclas/__init__.py
```python
from .trainer import Trainer
from .registered_models import (TensorFlow, PyTorch, PyG,
DGL_PyTorch, DGL_TensorFlow,
Common,
MAPPING)
import graphgallery
from functools import partial
def is_enabled(model: str) -> bool:
"""Return true if the model is enabled by the current backend.
Parameters
----------
model : str
The model name.
Returns
-------
bool
True if the model is enabled by the current backend.
"""
return model in enabled_models()
def enabled_models(with_common=True):
"""Return the models in the gallery enabled by the current backend.
Parameters
----------
with_common : bool
Whether to return common models (framework-agnostic).
Returns
-------
graphgallry.functional.BuhcnDict
A dict of models enabled by the current backend.
"""
return get_registry() + Common
graphgallery.load_models(__name__, mapping=MAPPING)
get_registry = partial(graphgallery.get_registry, mapping=MAPPING)
models = enabled_models
```
#### File: tensorflow/experimental/gcna.py
```python
import tensorflow as tf
from graphgallery.sequence import FullBatchSequence
from graphgallery import functional as gf
from graphgallery.gallery.nodeclas import TensorFlow
from graphgallery.nn.models import get_model
from ..gcn import GCN
@TensorFlow.register()
class GCNA(GCN):
"""
GCN + node attribute matrix
Implementation of Graph Convolutional Networks(GCN) concated with node attribute matrix.
"""
def model_step(self,
hids=[16],
acts=['relu'],
dropout=0.5,
weight_decay=5e-4,
lr=0.01,
bias=False):
model = get_model("GCNA", self.backend)
model = model(self.graph.num_node_attrs,
self.graph.num_node_classes,
hids=hids,
acts=acts,
dropout=dropout,
weight_decay=weight_decay,
lr=lr,
bias=bias)
return model
```
#### File: nodeclas/tensorflow/fastgcn.py
```python
import tensorflow as tf
from graphgallery.sequence import FastGCNBatchSequence
from graphgallery import functional as gf
from graphgallery.gallery.nodeclas import TensorFlow
from graphgallery.gallery.nodeclas import Trainer
from graphgallery.nn.models import get_model
@TensorFlow.register()
class FastGCN(Trainer):
"""
Implementation of Fast Graph Convolutional Networks (FastGCN).
`FastGCN: Fast Learning with Graph Convolutional Networks via Importance Sampling
<https://arxiv.org/abs/1801.10247>`
Tensorflow 1.x implementation: <https://github.com/matenure/FastGCN>
"""
def custom_setup(self):
cfg = self.cfg.fit
cfg.batch_size = 256
cfg.rank = 100
cfg = self.cfg.evaluate
cfg.batch_size = None
cfg.rank = None
def data_step(self,
adj_transform="normalize_adj",
attr_transform=None):
graph = self.graph
adj_matrix = gf.get(adj_transform)(graph.adj_matrix)
node_attr = gf.get(attr_transform)(graph.node_attr)
node_attr = adj_matrix @ node_attr
X, A = gf.astensor(node_attr, device=self.data_device), adj_matrix
# ``A`` and ``X`` are cached for later use
self.register_cache(X=X, A=A)
def model_step(self,
hids=[32],
acts=['relu'],
dropout=0.5,
weight_decay=5e-4,
lr=0.01,
bias=False):
model = get_model("FastGCN", self.backend)
model = model(self.graph.num_node_attrs,
self.graph.num_node_classes,
hids=hids,
acts=acts,
dropout=dropout,
weight_decay=weight_decay,
lr=lr,
bias=bias)
return model
def train_loader(self, index):
cfg = self.cfg.fit
labels = self.graph.node_label[index]
adj_matrix = self.graph.adj_matrix[index][:, index]
adj_matrix = self.transform.adj_transform(adj_matrix)
X = tf.gather(self.cache.X, index)
sequence = FastGCNBatchSequence([X, adj_matrix],
labels,
batch_size=cfg.batch_size,
rank=cfg.rank,
device=self.data_device)
return sequence
def test_loader(self, index):
cfg = self.cfg.evaluate
labels = self.graph.node_label[index]
A = self.cache.A[index]
sequence = FastGCNBatchSequence([self.cache.X, A],
labels,
batch_size=cfg.batch_size,
rank=cfg.rank,
device=self.data_device)
return sequence
```
#### File: tensorflow/conv/arma.py
```python
from tensorflow.keras import activations, constraints, initializers, regularizers
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Layer
class ARMAConv(Layer):
r"""
An Auto-Regressive Moving Average convolutional layer (ARMA) from the paper
[Graph Neural Networks with convolutional ARMA filters](https://arxiv.org/abs/1901.01343)
**Mode**: single, disjoint, mixed, batch.
This layer computes:
$$
\X' = \frac{1}{K} \sum\limits_{k=1}^K \bar\X_k^{(T)},
$$
where \(K\) is the order of the ARMA\(_K\) filter, and where:
$$
\bar \X_k^{(t + 1)} =
\sigma \left(\tilde \A \bar \X^{(t)} \W^{(t)} + \X \V^{(t)} \right)
$$
is a recursive approximation of an ARMA\(_1\) filter, where
\( \bar \X^{(0)} = \X \)
and
$$
\tilde \A = \D^{-1/2} \A \D^{-1/2}.
$$
**Input**
- Node features of shape `([batch], n_nodes, n_node_features)`;
- Normalized and rescaled Laplacian of shape `([batch], n_nodes, n_nodes)`; can be
computed with `spektral.utils.convolution.normalized_laplacian` and
`spektral.utils.convolution.rescale_laplacian`.
**Output**
- Node features with the same shape as the input, but with the last
dimension changed to `units`.
Parameters:
- `units`: number of output units;
- `order`: order of the full ARMA\(_K\) filter, i.e., the number of parallel
stacks in the layer;
- `iterations`: number of iterations to compute each ARMA\(_1\) approximation;
- `share_weights`: share the weights in each ARMA\(_1\) stack.
- `gcn_activation`: gcn_activation function to compute each ARMA\(_1\)
stack;
- `dropout_rate`: dropout rate for skip connection;
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
units,
order=1,
iterations=1,
share_weights=False,
gcn_activation="elu",
activation=None,
dropout=0.75,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
):
super().__init__(**kwargs)
self.units = units
self.iterations = iterations
self.order = order
self.share_weights = share_weights
self.use_bias = use_bias
self.activation = activations.get(activation)
self.gcn_activation = activations.get(gcn_activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = Dropout(dropout)
def build(self, input_shape):
assert len(input_shape) >= 2
F = input_shape[0][-1]
# Create weights for parallel stacks
# self.kernels[k][i] refers to the k-th stack, i-th iteration
self.kernels = []
for k in range(self.order):
kernel_stack = []
current_shape = F
for i in range(self.iterations):
kernel_stack.append(
self.create_weights(
current_shape, F, self.units, "ARMA_GCS_{}{}".format(k, i)
)
)
current_shape = self.units
if self.share_weights and i == 1:
# No need to continue because all weights will be shared
break
self.kernels.append(kernel_stack)
self.built = True
def call(self, inputs):
x, a = inputs
output = []
for k in range(self.order):
output_k = x
for i in range(self.iterations):
output_k = self.gcs([output_k, x, a], k, i)
output.append(output_k)
output = K.stack(output, axis=-1)
output = K.mean(output, axis=-1)
output = self.activation(output)
return output
def create_weights(self, input_dim, input_dim_skip, units, name):
"""
Creates a set of weights for a GCN with skip connections.
:param input_dim: dimension of the input space
:param input_dim_skip: dimension of the input space for the skip connection
:param units: dimension of the output space
:param name: name of the layer
:return:
- kernel_1, from input space of the layer to output space
- kernel_2, from input space of the skip connection to output space
- bias, bias vector on the output space if use_bias=True, None otherwise.
"""
kernel_1 = self.add_weight(
shape=(input_dim, units),
name=name + "_kernel_1",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
kernel_2 = self.add_weight(
shape=(input_dim_skip, units),
name=name + "_kernel_2",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
bias = None
if self.use_bias:
bias = self.add_weight(
shape=(units,),
name=name + "_bias",
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
return kernel_1, kernel_2, bias
def gcs(self, inputs, stack, iteration):
"""
Creates a graph convolutional layer with a skip connection.
:param inputs: list of input Tensors, namely
- input node features
- input node features for the skip connection
- normalized adjacency matrix;
:param stack: int, current stack (used to retrieve kernels);
:param iteration: int, current iteration (used to retrieve kernels);
:return: output node features.
"""
x, x_skip, a = inputs
itr = 1 if self.share_weights and iteration >= 1 else iteration
kernel_1, kernel_2, bias = self.kernels[stack][itr]
output = K.dot(x, kernel_1)
output = K.dot(a, output)
skip = K.dot(x_skip, kernel_2)
skip = self.dropout(skip)
output += skip
if self.use_bias:
output = K.bias_add(output, bias)
output = self.gcn_activation(output)
return output
def get_config(self):
config = {
"units": self.units,
"iterations": self.iterations,
"order": self.order,
"share_weights": self.share_weights,
"activation": activations.serialize(self.activation),
"gcn_activation": activations.serialize(self.gcn_activation),
'kernel_initializer': initializers.serialize(
self.kernel_initializer),
'bias_initializer': initializers.serialize(
self.bias_initializer),
'kernel_regularizer': regularizers.serialize(
self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(
self.bias_regularizer),
'activity_regularizer': regularizers.serialize(
self.activity_regularizer),
'kernel_constraint': constraints.serialize(
self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
```
#### File: models/pytorch/gat.py
```python
import torch.nn as nn
from torch import optim
from graphgallery.nn.models import TorchKeras
from graphgallery.nn.layers.pytorch import GATConv, SparseGATConv, Sequential, activations
from graphgallery.nn.metrics.pytorch import Accuracy
class GAT(TorchKeras):
def __init__(self,
in_features,
out_features,
hids=[8],
num_heads=[8],
acts=['elu'],
dropout=0.6,
weight_decay=5e-4,
lr=0.01,
bias=True):
super().__init__()
head = 1
conv = []
conv.append(nn.Dropout(dropout))
for hid, num_head, act in zip(hids, num_heads, acts):
conv.append(SparseGATConv(in_features * head,
hid,
attn_heads=num_head,
reduction='concat',
bias=bias))
conv.append(activations.get(act))
conv.append(nn.Dropout(dropout))
in_features = hid
head = num_head
conv.append(SparseGATConv(in_features * head,
out_features,
attn_heads=1,
reduction='average',
bias=bias))
conv = Sequential(*conv)
self.conv = conv
self.compile(loss=nn.CrossEntropyLoss(),
optimizer=optim.Adam([dict(params=conv[1].parameters(),
weight_decay=weight_decay),
dict(params=conv[2:].parameters(),
weight_decay=0.)], lr=lr),
metrics=[Accuracy()])
def forward(self, x, adj):
return self.conv(x, adj)
```
#### File: models/pytorch/latgcn.py
```python
import torch
import torch.nn as nn
from torch import optim
from graphgallery.nn.models import TorchKeras
from graphgallery.nn.layers.pytorch import GCNConv, Sequential, activations
from graphgallery.nn.metrics.pytorch import Accuracy
class LATGCN(TorchKeras):
def __init__(self,
in_features,
out_features,
num_nodes,
*,
gamma=0.01,
eta=0.1,
hids=[16],
acts=['relu'],
dropout=0.2,
weight_decay=5e-4,
lr=0.01,
bias=False):
super().__init__()
assert hids, "LATGCN requires hidden layers"
conv = []
conv.append(nn.Dropout(dropout))
for hid, act in zip(hids, acts):
conv.append(GCNConv(in_features,
hid,
bias=bias))
conv.append(activations.get(act))
conv.append(nn.Dropout(dropout))
in_features = hid
conv.append(GCNConv(in_features, out_features, bias=bias))
conv = Sequential(*conv)
self.zeta = nn.Parameter(torch.randn(num_nodes, hids[0]))
self.conv1 = conv[:3] # includes dropout, ReLU and the first GCN layer
self.conv2 = conv[3:] # remainder
self.compile(loss=nn.CrossEntropyLoss(),
optimizer=optim.Adam([dict(params=self.conv1.parameters(),
weight_decay=weight_decay),
dict(params=self.conv2.parameters(),
weight_decay=0.)], lr=lr),
metrics=[Accuracy()])
self.zeta_opt = optim.Adam([self.zeta], lr=lr)
self.gamma = gamma
self.eta = eta
def forward(self, x, adj):
h = self.conv1(x, adj)
logit = self.conv2(h, adj)
if self.training:
self.zeta.data = clip_by_norm(self.zeta, self.eta)
hp = h + self.zeta
logitp = self.conv2(hp, adj)
reg_loss = torch.norm(logitp - logit)
return logit, reg_loss
else:
return logit
def train_step_on_batch(self,
x,
y,
out_weight=None,
device="cpu"):
self.train()
optimizer = self.optimizer
loss_fn = self.loss
metrics = self.metrics
if not isinstance(x, (list, tuple)):
x = [x]
x = [_x.to(device) if hasattr(_x, 'to') else _x for _x in x]
y = y.to(device)
zeta_opt = self.zeta_opt
for _ in range(20):
zeta_opt.zero_grad()
_, reg_loss = self(*x)
reg_loss = -reg_loss
reg_loss.backward()
zeta_opt.step()
optimizer.zero_grad()
out, reg_loss = self(*x)
if out_weight is not None:
out = out[out_weight]
loss = loss_fn(out, y) + self.gamma * reg_loss
loss.backward()
optimizer.step()
for metric in metrics:
metric.update_state(y.cpu(), out.detach().cpu())
results = [loss.cpu().detach()] + [metric.result() for metric in metrics]
return dict(zip(self.metrics_names, results))
@torch.no_grad()
def clip_by_norm(tensor, clip_norm):
l2_norm = torch.norm(tensor, p=2, dim=1).view(-1, 1)
tensor = tensor * clip_norm / l2_norm
return tensor
```
#### File: models/pytorch/mlp.py
```python
import torch.nn as nn
from torch import optim
from graphgallery.nn.models import TorchKeras
from graphgallery.nn.metrics.pytorch import Accuracy
from graphgallery.nn.layers.pytorch import activations
class MLP(TorchKeras):
def __init__(self,
in_features,
out_features,
hids=[16],
acts=['relu'],
dropout=0.5,
weight_decay=5e-4,
lr=0.01,
bias=False):
super().__init__()
lin = []
lin.append(nn.Dropout(dropout))
for hid, act in zip(hids, acts):
lin.append(nn.Linear(in_features,
hid,
bias=bias))
lin.append(activations.get(act))
lin.append(nn.Dropout(dropout))
in_features = hid
lin.append(nn.Linear(in_features, out_features, bias=bias))
lin = nn.Sequential(*lin)
self.lin = lin
self.compile(loss=nn.CrossEntropyLoss(),
optimizer=optim.Adam([dict(params=lin[1].parameters(),
weight_decay=weight_decay),
dict(params=lin[2:].parameters(),
weight_decay=0.), ], lr=lr),
metrics=[Accuracy()])
def forward(self, x):
return self.lin(x)
``` |
{
"source": "jieru-hu/hydra",
"score": 2
} |
#### File: hydra/hydra/types.py
```python
import warnings
from dataclasses import dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Callable
from omegaconf import MISSING
TaskFunction = Callable[[Any], Any]
if TYPE_CHECKING:
from hydra._internal.callbacks import Callbacks
from hydra.core.config_loader import ConfigLoader
@dataclass
class HydraContext:
config_loader: "ConfigLoader"
callbacks: "Callbacks"
@dataclass
class TargetConf:
"""
This class is going away in Hydra 1.2.
You should no longer extend it or annotate with it.
instantiate will work correctly if you pass in a DictConfig object or any dataclass that has the
_target_ attribute.
"""
_target_: str = MISSING
def __post_init__(self) -> None:
# DEPRECATED: remove in 1.2
msg = "\nTargetConf is deprecated since Hydra 1.1 and will be removed in Hydra 1.2."
warnings.warn(message=msg, category=UserWarning)
class RunMode(Enum):
RUN = 1
MULTIRUN = 2
class ConvertMode(Enum):
"""ConvertMode for instantiate, controls return type.
A config is either config or instance-like (`_target_` field).
If instance-like, instantiate resolves the callable (class or
function) and returns the result of the call on the rest of the
parameters.
If "none", config-like configs will be kept as is.
If "partial", config-like configs will be converted to native python
containers (list and dict), unless they are structured configs (
dataclasses or attr instances).
If "all", config-like configs will all be converted to native python
containers (list and dict).
"""
# Use DictConfig/ListConfig
NONE = "none"
# Convert the OmegaConf config to primitive container, Structured Configs are preserved
PARTIAL = "partial"
# Fully convert the OmegaConf config to primitive containers (dict, list and primitives).
ALL = "all"
def __eq__(self, other: Any) -> Any:
if isinstance(other, ConvertMode):
return other.value == self.value
elif isinstance(other, str):
return other.upper() == self.name.upper()
else:
return NotImplemented
``` |
{
"source": "jieru-hu/hydra-torch",
"score": 2
} |
#### File: hydra-configs-torch/tests/test_instantiate_data.py
```python
import pytest
from hydra.utils import get_class, instantiate
from omegaconf import OmegaConf
import torch.utils.data as data
import torch
from typing import Any
dummy_tensor = torch.tensor((1, 1))
dummy_dataset = data.dataset.TensorDataset(dummy_tensor)
dummy_sampler = data.Sampler(data_source=dummy_dataset)
@pytest.mark.parametrize(
"modulepath, classname, cfg, passthrough_args, passthrough_kwargs, expected",
[
pytest.param(
"utils.data.dataloader",
"DataLoader",
{"batch_size": 4},
[],
{"dataset": dummy_dataset},
data.DataLoader(batch_size=4, dataset=dummy_dataset),
id="DataLoaderConf",
),
pytest.param(
"utils.data.dataset",
"Dataset",
{},
[],
{},
data.Dataset(),
id="DatasetConf",
),
pytest.param(
"utils.data.dataset",
"ChainDataset",
{},
[],
{"datasets": [dummy_dataset, dummy_dataset]},
data.ChainDataset(datasets=[dummy_dataset, dummy_dataset]),
id="ChainDatasetConf",
),
pytest.param(
"utils.data.dataset",
"ConcatDataset",
{},
[],
{"datasets": [dummy_dataset, dummy_dataset]},
data.ConcatDataset(datasets=[dummy_dataset, dummy_dataset]),
id="ConcatDatasetConf",
),
pytest.param(
"utils.data.dataset",
"IterableDataset",
{},
[],
{},
data.IterableDataset(),
id="IterableDatasetConf",
),
# TODO: investigate asterisk in signature instantiation limitation
# pytest.param(
# "utils.data.dataset",
# "TensorDataset",
# {},
# [],
# {"tensors":[dummy_tensor]},
# data.TensorDataset(dummy_tensor),
# id="TensorDatasetConf",
# ),
pytest.param(
"utils.data.dataset",
"Subset",
{},
[],
{"dataset": dummy_dataset, "indices": [0]},
data.Subset(dummy_dataset, 0),
id="SubsetConf",
),
pytest.param(
"utils.data.sampler",
"Sampler",
{},
[],
{"data_source": dummy_dataset},
data.Sampler(data_source=dummy_dataset),
id="SamplerConf",
),
pytest.param(
"utils.data.sampler",
"BatchSampler",
{"batch_size": 4, "drop_last": False},
[],
{"sampler": dummy_sampler},
data.BatchSampler(sampler=dummy_sampler, batch_size=4, drop_last=False),
id="BatchSamplerConf",
),
pytest.param(
"utils.data.sampler",
"RandomSampler",
{},
[],
{"data_source": dummy_dataset},
data.RandomSampler(data_source=dummy_dataset),
id="RandomSamplerConf",
),
pytest.param(
"utils.data.sampler",
"SequentialSampler",
{},
[],
{"data_source": dummy_dataset},
data.SequentialSampler(data_source=dummy_dataset),
id="SequentialSamplerConf",
),
pytest.param(
"utils.data.sampler",
"SubsetRandomSampler",
{"indices": [1]},
[],
{},
data.SubsetRandomSampler(indices=[1]),
id="SubsetRandomSamplerConf",
),
pytest.param(
"utils.data.sampler",
"WeightedRandomSampler",
{"weights": [1], "num_samples": 1},
[],
{},
data.WeightedRandomSampler(weights=[1], num_samples=1),
id="WeightedRandomSamplerConf",
),
# TODO: investigate testing distributed instantiation
# pytest.param(
# "utils.data.distributed",
# "DistributedSampler",
# {},
# [],
# {"dataset": dummy_dataset},
# data.DistributedSampler(group=dummy_group,dataset=dummy_dataset),
# id="DistributedSamplerConf",
# ),
],
)
def test_instantiate_classes(
modulepath: str,
classname: str,
cfg: Any,
passthrough_args: Any,
passthrough_kwargs: Any,
expected: Any,
) -> None:
full_class = f"hydra_configs.torch.{modulepath}.{classname}Conf"
schema = OmegaConf.structured(get_class(full_class))
cfg = OmegaConf.merge(schema, cfg)
obj = instantiate(cfg, *passthrough_args, **passthrough_kwargs)
assert isinstance(obj, type(expected))
``` |
{
"source": "jieru-hu/omegaconf",
"score": 2
} |
#### File: omegaconf/tests/test_basic_ops_list.py
```python
import re
from textwrap import dedent
from typing import Any, List, Optional
from pytest import mark, param, raises
from omegaconf import MISSING, AnyNode, DictConfig, ListConfig, OmegaConf, flag_override
from omegaconf._utils import nullcontext
from omegaconf.errors import (
ConfigTypeError,
InterpolationKeyError,
InterpolationToMissingValueError,
KeyValidationError,
MissingMandatoryValue,
UnsupportedValueType,
ValidationError,
)
from omegaconf.nodes import IntegerNode, StringNode
from tests import Color, IllegalType, User
def test_list_value() -> None:
c = OmegaConf.create("a: [1,2]")
assert {"a": [1, 2]} == c
def test_list_of_dicts() -> None:
v = [dict(key1="value1"), dict(key2="value2")]
c = OmegaConf.create(v)
assert c[0].key1 == "value1"
assert c[1].key2 == "value2"
@mark.parametrize("default", [None, 0, "default"])
@mark.parametrize(
("cfg", "key"),
[
(["???"], 0),
([DictConfig(content="???")], 0),
([ListConfig(content="???")], 0),
],
)
def test_list_get_return_default(cfg: List[Any], key: int, default: Any) -> None:
c = OmegaConf.create(cfg)
val = c.get(key, default_value=default)
assert val is default
@mark.parametrize("default", [None, 0, "default"])
@mark.parametrize(
("cfg", "key", "expected"),
[
(["found"], 0, "found"),
([None], 0, None),
([DictConfig(content=None)], 0, None),
([ListConfig(content=None)], 0, None),
],
)
def test_list_get_do_not_return_default(
cfg: List[Any], key: int, expected: Any, default: Any
) -> None:
c = OmegaConf.create(cfg)
val = c.get(key, default_value=default)
assert val == expected
@mark.parametrize(
"input_, expected, expected_no_resolve, list_key",
[
param([1, 2], [1, 2], [1, 2], None, id="simple"),
param(["${1}", 2], [2, 2], ["${1}", 2], None, id="interpolation"),
param(
[ListConfig(None), ListConfig("${.2}"), [1, 2]],
[None, ListConfig([1, 2]), ListConfig([1, 2])],
[None, ListConfig("${.2}"), ListConfig([1, 2])],
None,
id="iter_over_lists",
),
param(
[DictConfig(None), DictConfig("${.2}"), {"a": 10}],
[None, DictConfig({"a": 10}), DictConfig({"a": 10})],
[None, DictConfig("${.2}"), DictConfig({"a": 10})],
None,
id="iter_over_dicts",
),
param(
["???", ListConfig("???"), DictConfig("???")],
raises(MissingMandatoryValue),
["???", ListConfig("???"), DictConfig("???")],
None,
id="iter_over_missing",
),
param(
{
"defaults": [
{"optimizer": "adam"},
{"dataset": "imagenet"},
{"foo": "${defaults.0.optimizer}_${defaults.1.dataset}"},
]
},
[
OmegaConf.create({"optimizer": "adam"}),
OmegaConf.create({"dataset": "imagenet"}),
OmegaConf.create({"foo": "adam_imagenet"}),
],
[
OmegaConf.create({"optimizer": "adam"}),
OmegaConf.create({"dataset": "imagenet"}),
OmegaConf.create(
{"foo": "${defaults.0.optimizer}_${defaults.1.dataset}"}
),
],
"defaults",
id="str_interpolation",
),
],
)
def test_iterate_list(
input_: Any, expected: Any, expected_no_resolve: Any, list_key: str
) -> None:
c = OmegaConf.create(input_)
if list_key is not None:
lst = c.get(list_key)
else:
lst = c
def test_iter(iterator: Any, expected_output: Any) -> None:
if isinstance(expected_output, list):
items = [x for x in iterator]
assert items == expected_output
for idx in range(len(items)):
assert type(items[idx]) is type(expected_output[idx]) # noqa
else:
with expected_output:
for _ in iterator:
pass
test_iter(iter(lst), expected)
test_iter(lst._iter_ex(resolve=False), expected_no_resolve)
def test_iterate_list_with_missing_interpolation() -> None:
c = OmegaConf.create([1, "${10}"])
itr = iter(c)
assert 1 == next(itr)
with raises(InterpolationKeyError):
next(itr)
def test_iterate_list_with_missing() -> None:
c = OmegaConf.create([1, "???"])
itr = iter(c)
assert 1 == next(itr)
with raises(MissingMandatoryValue):
next(itr)
def test_items_with_interpolation() -> None:
c = OmegaConf.create(["foo", "${0}"])
assert c == ["foo", "foo"]
@mark.parametrize(
["cfg", "key", "expected_out", "expected_cfg"],
[
param([1, 2, 3], 0, 1, [2, 3]),
param([1, 2, 3], None, 3, [1, 2]),
param(["???", 2, 3], 0, None, [2, 3]),
param([1, None, 3], 1, None, [1, 3]),
],
)
def test_list_pop(
cfg: List[Any], key: Optional[int], expected_out: Any, expected_cfg: Any
) -> None:
c = OmegaConf.create(cfg)
val = c.pop() if key is None else c.pop(key)
assert val == expected_out
assert c == expected_cfg
validate_list_keys(c)
@mark.parametrize(
["cfg", "key", "exc"],
[
param([1, 2, 3], 100, IndexError),
param(["${4}", 2, 3], 0, InterpolationKeyError),
param(["${1}", "???", 3], 0, InterpolationToMissingValueError),
],
)
def test_list_pop_errors(cfg: List[Any], key: int, exc: type) -> None:
c = OmegaConf.create(cfg)
with raises(exc):
c.pop(key)
assert c == cfg
validate_list_keys(c)
def test_list_pop_on_unexpected_exception_not_modifying() -> None:
src = [1, 2, 3, 4]
c = OmegaConf.create(src)
with raises(ConfigTypeError):
c.pop("foo") # type: ignore
assert c == src
def test_in_list() -> None:
c = OmegaConf.create([10, 11, {"a": 12}])
assert 10 in c
assert 11 in c
assert {"a": 12} in c
assert "blah" not in c
def test_in_with_interpolation() -> None:
c = OmegaConf.create({"a": ["${b}"], "b": 10})
assert 10 in c.a
@mark.parametrize(
("lst", "expected"),
[
param(
ListConfig(content=None),
raises(
TypeError,
match="Cannot check if an item is in a ListConfig object representing None",
),
id="ListConfig(None)",
),
param(
ListConfig(content="???"),
raises(
MissingMandatoryValue,
match="Cannot check if an item is in missing ListConfig",
),
id="ListConfig(???)",
),
],
)
def test_not_in_special_lists(lst: Any, expected: Any) -> None:
with expected:
"foo" not in lst
def test_list_config_with_list() -> None:
c = OmegaConf.create([])
assert isinstance(c, ListConfig)
def test_list_config_with_tuple() -> None:
c = OmegaConf.create(())
assert isinstance(c, ListConfig)
def test_items_on_list() -> None:
c = OmegaConf.create([1, 2])
with raises(AttributeError):
c.items()
def test_list_enumerate() -> None:
src: List[Optional[str]] = ["a", "b", "c", "d"]
c = OmegaConf.create(src)
for i, v in enumerate(c):
assert src[i] == v
assert v is not None
src[i] = None
for v in src:
assert v is None
def test_list_delitem() -> None:
c = OmegaConf.create([1, 2, 3])
assert c == [1, 2, 3]
del c[0]
assert c == [2, 3]
with raises(IndexError):
del c[100]
validate_list_keys(c)
@mark.parametrize(
"lst,expected",
[
(OmegaConf.create([1, 2]), 2),
(ListConfig(content=None), 0),
(ListConfig(content="???"), 0),
(ListConfig(content="${foo}"), 0),
(ListConfig(content="${foo}", parent=DictConfig({"foo": [1, 2]})), 0),
],
)
def test_list_len(lst: Any, expected: Any) -> None:
assert len(lst) == expected
def test_nested_list_assign_illegal_value() -> None:
c = OmegaConf.create({"a": [None]})
with raises(
UnsupportedValueType,
match=re.escape(
dedent(
"""\
Value 'IllegalType' is not a supported primitive type
full_key: a[0]"""
)
),
):
c.a[0] = IllegalType()
def test_list_append() -> None:
c = OmegaConf.create([])
c.append(1)
c.append(2)
c.append({})
c.append([])
assert c == [1, 2, {}, []]
validate_list_keys(c)
@mark.parametrize(
"lc,element,expected",
[
param(
ListConfig(content=[], element_type=int),
"foo",
raises(
ValidationError,
match=re.escape("Value 'foo' could not be converted to Integer"),
),
id="append_str_to_list[int]",
),
param(
ListConfig(content=[], element_type=int, is_optional=False),
None,
raises(
ValidationError,
match=re.escape("Non optional field cannot be assigned None"),
),
id="append_None_to_list[int]",
),
param(
ListConfig(content=[], element_type=Color),
"foo",
raises(
ValidationError,
match=re.escape(
"Invalid value 'foo', expected one of [RED, GREEN, BLUE]"
),
),
id="append_str_to_list[Color]",
),
param(
ListConfig(content=[], element_type=User),
"foo",
raises(
ValidationError,
match=re.escape(
"Invalid type assigned: str is not a subclass of User. value: foo"
),
),
id="append_str_to_list[User]",
),
param(
ListConfig(content=[], element_type=User),
{"name": "Bond", "age": 7},
raises(
ValidationError,
match=re.escape(
"Invalid type assigned: dict is not a subclass of User. value: {'name': 'Bond', 'age': 7}"
),
),
id="list:convert_dict_to_user",
),
param(
ListConfig(content=[], element_type=User),
{},
raises(
ValidationError,
match=re.escape(
"Invalid type assigned: dict is not a subclass of User. value: {}"
),
),
id="list:convert_empty_dict_to_user",
),
],
)
def test_append_invalid_element_type(
lc: ListConfig, element: Any, expected: Any
) -> None:
with expected:
lc.append(element)
def test_assign_none_to_non_optional() -> None:
cfg = ListConfig(content=["abc"], element_type=str, is_optional=False)
with raises(ValidationError):
cfg[0] = None
@mark.parametrize(
"lc,element,expected",
[
param(
ListConfig(content=[], element_type=int),
"10",
10,
id="list:convert_str_to_int",
),
param(
ListConfig(content=[], element_type=float),
"10",
10.0,
id="list:convert_str_to_float",
),
param(
ListConfig(content=[], element_type=Color),
"RED",
Color.RED,
id="list:convert_str_to_float",
),
],
)
def test_append_convert(lc: ListConfig, element: Any, expected: Any) -> None:
lc.append(element)
value = lc[-1]
assert value == expected
assert type(value) == type(expected)
@mark.parametrize(
"index, expected", [(slice(1, 3), [11, 12]), (slice(0, 3, 2), [10, 12]), (-1, 13)]
)
def test_list_index(index: Any, expected: Any) -> None:
c = OmegaConf.create([10, 11, 12, 13])
assert c[index] == expected
@mark.parametrize(
"cfg, expected",
[
(OmegaConf.create([1, 2, 3]), ["0", "1", "2"]),
(ListConfig(content="???"), []),
(ListConfig(content=None), []),
],
)
def test_list_dir(cfg: Any, expected: Any) -> None:
assert dir(cfg) == expected
def validate_list_keys(c: Any) -> None:
# validate keys are maintained
for i in range(len(c)):
assert c._get_node(i)._metadata.key == i
@mark.parametrize(
"input_, index, value, expected, expected_node_type, expectation",
[
(["a", "b", "c"], 1, 100, ["a", 100, "b", "c"], AnyNode, None),
(
["a", "b", "c"],
1,
IntegerNode(100),
["a", 100, "b", "c"],
IntegerNode,
None,
),
(["a", "b", "c"], 1, "foo", ["a", "foo", "b", "c"], AnyNode, None),
(
["a", "b", "c"],
1,
StringNode("foo"),
["a", "foo", "b", "c"],
StringNode,
None,
),
(
ListConfig(element_type=int, content=[]),
0,
"foo",
None,
None,
ValidationError,
),
(
ListConfig(element_type=int, content=[], is_optional=False),
0,
None,
None,
None,
ValidationError,
),
],
)
def test_insert(
input_: List[str],
index: int,
value: Any,
expected: Any,
expected_node_type: type,
expectation: Any,
) -> None:
c = OmegaConf.create(input_)
if expectation is None:
c.insert(index, value)
assert c == expected
assert type(c._get_node(index)) == expected_node_type
else:
with raises(expectation):
c.insert(index, value)
validate_list_keys(c)
@mark.parametrize(
"lst,idx,value,expectation",
[
(ListConfig(content=None), 0, 10, raises(TypeError)),
(ListConfig(content="???"), 0, 10, raises(MissingMandatoryValue)),
],
)
def test_insert_special_list(lst: Any, idx: Any, value: Any, expectation: Any) -> None:
with expectation:
lst.insert(idx, value)
@mark.parametrize(
"src, append, result",
[
([], [], []),
([1, 2], [3], [1, 2, 3]),
([1, 2], ("a", "b", "c"), [1, 2, "a", "b", "c"]),
],
)
def test_extend(src: List[Any], append: List[Any], result: List[Any]) -> None:
lst = OmegaConf.create(src)
lst.extend(append)
assert lst == result
@mark.parametrize(
"src, remove, result, expectation",
[
([10], 10, [], nullcontext()),
([], "oops", None, raises(ValueError)),
([0, dict(a="blah"), 10], dict(a="blah"), [0, 10], nullcontext()),
([1, 2, 1, 2], 2, [1, 1, 2], nullcontext()),
],
)
def test_remove(src: List[Any], remove: Any, result: Any, expectation: Any) -> None:
with expectation:
lst = OmegaConf.create(src)
assert isinstance(lst, ListConfig)
lst.remove(remove)
assert lst == result
@mark.parametrize("src", [[], [1, 2, 3], [None, dict(foo="bar")]])
@mark.parametrize("num_clears", [1, 2])
def test_clear(src: List[Any], num_clears: int) -> None:
lst = OmegaConf.create(src)
for i in range(num_clears):
lst.clear()
assert lst == []
@mark.parametrize(
"src, item, expected_index, expectation",
[
([], 20, -1, raises(ValueError)),
([10, 20], 10, 0, nullcontext()),
([10, 20], 20, 1, nullcontext()),
],
)
def test_index(
src: List[Any], item: Any, expected_index: int, expectation: Any
) -> None:
with expectation:
lst = OmegaConf.create(src)
assert lst.index(item) == expected_index
def test_index_with_range() -> None:
lst = OmegaConf.create([10, 20, 30, 40, 50])
assert lst.index(x=30) == 2
assert lst.index(x=30, start=1) == 2
assert lst.index(x=30, start=1, end=3) == 2
with raises(ValueError):
lst.index(x=30, start=3)
with raises(ValueError):
lst.index(x=30, end=2)
@mark.parametrize(
"src, item, count",
[([], 10, 0), ([10], 10, 1), ([10, 2, 10], 10, 2), ([10, 2, 10], None, 0)],
)
def test_count(src: List[Any], item: Any, count: int) -> None:
lst = OmegaConf.create(src)
assert lst.count(item) == count
def test_sort() -> None:
c = OmegaConf.create(["bbb", "aa", "c"])
c.sort()
assert ["aa", "bbb", "c"] == c
c.sort(reverse=True)
assert ["c", "bbb", "aa"] == c
c.sort(key=len)
assert ["c", "aa", "bbb"] == c
c.sort(key=len, reverse=True)
assert ["bbb", "aa", "c"] == c
def test_insert_throws_not_changing_list() -> None:
c = OmegaConf.create([])
iv = IllegalType()
with raises(ValueError):
c.insert(0, iv)
assert len(c) == 0
assert c == []
with flag_override(c, "allow_objects", True):
c.insert(0, iv)
assert c == [iv]
def test_append_throws_not_changing_list() -> None:
c = OmegaConf.create([])
iv = IllegalType()
with raises(ValueError):
c.append(iv)
assert len(c) == 0
assert c == []
validate_list_keys(c)
with flag_override(c, "allow_objects", True):
c.append(iv)
assert c == [iv]
def test_hash() -> None:
c1 = OmegaConf.create([10])
c2 = OmegaConf.create([10])
assert hash(c1) == hash(c2)
c2[0] = 20
assert hash(c1) != hash(c2)
@mark.parametrize(
"in_list1, in_list2,in_expected",
[
([], [], []),
([1, 2], [3, 4], [1, 2, 3, 4]),
(["x", 2, "${0}"], [5, 6, 7], ["x", 2, "x", 5, 6, 7]),
],
)
class TestListAdd:
def test_list_plus(
self, in_list1: List[Any], in_list2: List[Any], in_expected: List[Any]
) -> None:
list1 = OmegaConf.create(in_list1)
list2 = OmegaConf.create(in_list2)
expected = OmegaConf.create(in_expected)
ret = list1 + list2
assert ret == expected
def test_list_plus_eq(
self, in_list1: List[Any], in_list2: List[Any], in_expected: List[Any]
) -> None:
list1 = OmegaConf.create(in_list1)
list2 = OmegaConf.create(in_list2)
expected = OmegaConf.create(in_expected)
list1 += list2
assert list1 == expected
def test_deep_add() -> None:
cfg = OmegaConf.create({"foo": [1, 2, "${bar}"], "bar": "xx"})
lst = cfg.foo + [10, 20]
assert lst == [1, 2, "xx", 10, 20]
def test_set_with_invalid_key() -> None:
cfg = OmegaConf.create([1, 2, 3])
with raises(KeyValidationError):
cfg["foo"] = 4 # type: ignore
@mark.parametrize(
"lst,idx,expected",
[
(OmegaConf.create([1, 2]), 0, 1),
(ListConfig(content=None), 0, TypeError),
(ListConfig(content="???"), 0, MissingMandatoryValue),
],
)
def test_getitem(lst: Any, idx: Any, expected: Any) -> None:
if isinstance(expected, type):
with raises(expected):
lst.__getitem__(idx)
else:
assert lst.__getitem__(idx) == expected
@mark.parametrize(
"sli",
[
(slice(None, None, None)),
(slice(1, None, None)),
(slice(-1, None, None)),
(slice(None, 1, None)),
(slice(None, -1, None)),
(slice(None, None, 1)),
(slice(None, None, -1)),
(slice(1, None, -2)),
(slice(None, 1, -2)),
(slice(1, 3, -1)),
(slice(3, 1, -1)),
],
)
def test_getitem_slice(sli: slice) -> None:
lst = [1, 2, 3]
olst = OmegaConf.create([1, 2, 3])
expected = lst[sli.start : sli.stop : sli.step]
assert olst.__getitem__(sli) == expected
@mark.parametrize(
"lst,idx,expected",
[
(OmegaConf.create([1, 2]), 0, 1),
(OmegaConf.create([1, 2]), "foo", KeyValidationError),
(OmegaConf.create([1, "${2}"]), 1, InterpolationKeyError),
(OmegaConf.create(["???", "${0}"]), 1, InterpolationToMissingValueError),
(ListConfig(content=None), 0, TypeError),
(ListConfig(content="???"), 0, MissingMandatoryValue),
],
)
def test_get(lst: Any, idx: Any, expected: Any) -> None:
if isinstance(expected, type):
with raises(expected):
lst.get(idx)
else:
assert lst.__getitem__(idx) == expected
def test_getattr() -> None:
src = ["a", "b", "c"]
cfg = OmegaConf.create(src)
with raises(AttributeError):
getattr(cfg, "foo")
assert getattr(cfg, "0") == src[0]
assert getattr(cfg, "1") == src[1]
assert getattr(cfg, "2") == src[2]
def test_shallow_copy() -> None:
cfg = OmegaConf.create([1, 2])
c = cfg.copy()
assert cfg == c
cfg[0] = 42
assert cfg[0] == 42
assert c[0] == 1
def test_shallow_copy_missing() -> None:
cfg = ListConfig(content=MISSING)
c = cfg.copy()
c._set_value([1])
assert c[0] == 1
assert cfg._is_missing()
def test_shallow_copy_none() -> None:
cfg = ListConfig(content=None)
c = cfg.copy()
c._set_value([1])
assert c[0] == 1
assert cfg._is_none()
@mark.parametrize("flag", ["struct", "readonly"])
def test_listconfig_creation_with_parent_flag(flag: str) -> None:
parent = OmegaConf.create([])
parent._set_flag(flag, True)
d = [1, 2, 3]
cfg = ListConfig(d, parent=parent)
assert cfg == d
``` |
{
"source": "JierunChen/TVConv",
"score": 2
} |
#### File: od_oc_segmentation/dataloaders/fundus_dataloader.py
```python
from __future__ import print_function, division
import os
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
from od_oc_segmentation.utils.Utils import Path
from glob import glob
import random
import copy
class FundusSegmentation(Dataset):
"""
Fundus segmentation dataset
including 5 domain dataset
one for test others for training
"""
def __init__(self,
base_dir=Path.db_root_dir('fundus'),
phase='train',
splitid=[2, 3, 4],
transform=None,
state='train'
):
"""
:param base_dir: path to VOC dataset directory
:param split: train/val
:param transform: transform to apply
"""
# super().__init__()
self.state = state
self._base_dir = base_dir
self.image_list = []
self.phase = phase
self.image_pool = {'DGS':[], 'REF':[], 'RIM':[], 'REF_val':[]}
self.label_pool = {'DGS':[], 'REF':[], 'RIM':[], 'REF_val':[]}
self.img_name_pool = {'DGS':[], 'REF':[], 'RIM':[], 'REF_val':[]}
self.flags_DGS = ['gd', 'nd']
self.flags_REF = ['g', 'n']
self.flags_RIM = ['G', 'N', 'S']
self.flags_REF_val = ['V']
self.splitid = splitid
SEED = 1212
random.seed(SEED)
for id in splitid:
self._image_dir = os.path.join(self._base_dir, 'Domain'+str(id), phase, 'ROIs/image/')
print('==> Loading {} data from: {}'.format(phase, self._image_dir))
imagelist = glob(self._image_dir + '*.png')
for image_path in imagelist:
gt_path = image_path.replace('image', 'mask')
self.image_list.append({'image': image_path, 'label': gt_path})
self.transform = transform
self._read_img_into_memory()
for key in self.image_pool:
if len(self.image_pool[key]) < 1:
del self.image_pool[key]
del self.label_pool[key]
del self.img_name_pool[key]
break
for key in self.image_pool:
if len(self.image_pool[key]) < 1:
del self.image_pool[key]
del self.label_pool[key]
del self.img_name_pool[key]
break
for key in self.image_pool:
if len(self.image_pool[key]) < 1:
del self.image_pool[key]
del self.label_pool[key]
del self.img_name_pool[key]
break
# Display stats
print('-----Total number of images in {}: {:d}'.format(phase, len(self.image_list)))
def __len__(self):
max = -1
for key in self.image_pool:
if len(self.image_pool[key])>max:
max = len(self.image_pool[key])
return max
def __getitem__(self, index):
if self.phase != 'test':
sample = []
for key in self.image_pool:
domain_code = list(self.image_pool.keys()).index(key)
index = np.random.choice(len(self.image_pool[key]), 1)[0]
_img = self.image_pool[key][index]
_target = self.label_pool[key][index]
_img_name = self.img_name_pool[key][index]
anco_sample = {'image': _img, 'label': _target, 'img_name': _img_name, 'dc': domain_code}
if self.transform is not None:
anco_sample = self.transform(anco_sample)
sample.append(anco_sample)
else:
sample = []
for key in self.image_pool:
domain_code = list(self.image_pool.keys()).index(key)
_img = self.image_pool[key][index]
_target = self.label_pool[key][index]
_img_name = self.img_name_pool[key][index]
anco_sample = {'image': _img, 'label': _target, 'img_name': _img_name, 'dc': domain_code}
if self.transform is not None:
anco_sample = self.transform(anco_sample)
sample=anco_sample
return sample
def _read_img_into_memory(self):
img_num = len(self.image_list)
for index in range(img_num):
basename = os.path.basename(self.image_list[index]['image'])
Flag = "NULL"
if basename[0:2] in self.flags_DGS:
Flag = 'DGS'
elif basename[0] in self.flags_REF:
Flag = 'REF'
elif basename[0] in self.flags_RIM:
Flag = 'RIM'
elif basename[0] in self.flags_REF_val:
Flag = 'REF_val'
else:
print("[ERROR:] Unknown dataset!")
return 0
if self.splitid[0] == '4':
# self.image_pool[Flag].append(Image.open(self.image_list[index]['image']).convert('RGB').resize((256, 256), Image.LANCZOS))
self.image_pool[Flag].append(Image.open(self.image_list[index]['image']).convert('RGB').crop((144, 144, 144+512, 144+512)).resize((256, 256), Image.LANCZOS))
_target = np.asarray(Image.open(self.image_list[index]['label']).convert('L'))
_target = _target[144:144+512, 144:144+512]
_target = Image.fromarray(_target)
else:
self.image_pool[Flag].append(
Image.open(self.image_list[index]['image']).convert('RGB').resize((256, 256), Image.LANCZOS))
# self.image_pool[Flag].append(Image.open(self.image_list[index]['image']).convert('RGB'))
_target = Image.open(self.image_list[index]['label'])
if _target.mode is 'RGB':
_target = _target.convert('L')
if self.state != 'prediction':
_target = _target.resize((256, 256))
# print(_target.size)
# print(_target.mode)
self.label_pool[Flag].append(_target)
# if self.split[0:4] in 'test':
_img_name = self.image_list[index]['image'].split('/')[-1]
self.img_name_pool[Flag].append(_img_name)
def __str__(self):
# return 'Fundus(phase=' + self.phase+str(args.datasetTest[0]) + ')'
return 'Fundus(phase=' + self.phase + ')'
if __name__ == '__main__':
import custom_transforms as tr
from .utils import decode_segmap
from torch.utils.data import DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
composed_transforms_tr = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.RandomSized(512),
tr.RandomRotate(15),
tr.ToTensor()])
voc_train = FundusSegmentation(split='train1',
transform=composed_transforms_tr)
dataloader = DataLoader(voc_train, batch_size=5, shuffle=True, num_workers=2)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = tmp
img_tmp = np.transpose(img[jj], axes=[1, 2, 0]).astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
break
plt.show(block=True)
```
#### File: od_oc_segmentation/networks/baseline.py
```python
from .baseline_utils import IntermediateLayerGetter
from .baseline_deeplab import DeepLabHead, DeepLabHeadV3Plus, DeepLabV3
from .baseline_mobilenetv2 import mobilenet_v2
def _segm_mobilenet(atom, name, backbone_name, num_classes, output_stride, pretrained_backbone, **kwargs):
if output_stride == 8:
aspp_dilate = [12, 24, 36]
else:
aspp_dilate = [6, 12, 18]
backbone = mobilenet_v2(pretrained=pretrained_backbone, output_stride=output_stride)
# rename layers
backbone.low_level_features = backbone.features[0:4]
backbone.high_level_features = backbone.features[4:-1]
backbone.features = None
backbone.classifier = None
inplanes = 320
low_level_planes = 24
if name == 'deeplabv3plus':
return_layers = {'high_level_features': 'out', 'low_level_features': 'low_level'}
classifier = DeepLabHeadV3Plus(atom, inplanes, low_level_planes, num_classes, aspp_dilate, **kwargs)
elif name == 'deeplabv3':
return_layers = {'high_level_features': 'out'}
classifier = DeepLabHead(inplanes, num_classes, aspp_dilate, **kwargs)
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
model = DeepLabV3(backbone, classifier)
return model
def _load_model(atom, arch_type, backbone, num_classes, output_stride, pretrained_backbone, **kwargs):
if backbone == 'mobilenetv2':
model = _segm_mobilenet(atom, arch_type, backbone, num_classes, output_stride=output_stride,
pretrained_backbone=pretrained_backbone, **kwargs)
else:
raise NotImplementedError
return model
def deeplabv3plus_mobilenet(atom, num_classes=21, output_stride=8, pretrained_backbone=True, **kwargs):
"""Constructs a DeepLabV3+ model with a MobileNetv2 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model(atom, 'deeplabv3plus', 'mobilenetv2', num_classes, output_stride=output_stride,
pretrained_backbone=pretrained_backbone, **kwargs)
``` |
{
"source": "JieShenAI/jshen",
"score": 3
} |
#### File: src/jspider/JProxy.py
```python
import pickle
import random
from dataclasses import dataclass, field
import datetime
from net import send_request
import requests
import logging
from JTime import now
from pathlib import Path
def format_proxy(ip: str, port: str) -> dict:
ip_port = "%s:%s" % (ip.strip(), port.strip())
proxy = {
"http://": ip_port,
"https://": ip_port
}
return proxy
def is_alive(ip, port):
"""Check if a proxy is alive or not
@return: True if alive, False otherwise
很费时间,评价某个代理是否可用的标准是 timeout超时
"""
proxy = format_proxy(ip, port)
try:
requests.get('http://www.baidu.com', proxies=proxy, timeout=3)
return True
except:
return False
@dataclass(order=True)
class JProxy:
"""
每天上午,9点更新代理池
对代理池,构建一个类:
* ip
* port
* 使用的次数
功能:
* 在某个时间段,专门去检查死亡的代理是否还活着
* 在爬取之前,再检查某个代理是否活着
* get_proxy方法返回一个活着的ip。
"""
# ip: str = field(default=None, compare=False)
port: str = field(default=None, compare=False)
cnt: int = field(default=0, compare=True)
born: datetime = field(default=None, compare=False)
def get_proxy_66ip(url_, proxy_impl):
"""
66代理网页,某一页代理ip的爬取
http://www.66ip.cn/index.html
"""
soup = send_request(url_)
center_tag = soup.select('div[align="center"]')[1]
tr_tag = center_tag.find_all('tr')
for tr in tr_tag[1::]:
tmp = tr.find_all('td')[:2:]
ip = tmp[0].string.strip()
port = tmp[1].string.strip()
if is_alive(ip, port):
proxy_impl.add(ip, JProxy(port, 0, now))
def get_proxys(proxy_impl, end_page):
"""
66代理网页,代理ip的爬取
这个网站可能会挂,或者网页格式会变化,若出错try,except把错误抛出去
"""
_66ip = "http://www.66ip.cn/index.html"
try:
get_proxy_66ip(_66ip, proxy_impl)
for p in range(2, end_page + 1):
url_ = f"http://www.66ip.cn/{p}.html"
get_proxy_66ip(url_, proxy_impl)
except Exception as e:
logging.error(e.args)
class ProxyImpl:
def __init__(self, grave_obj_path: str):
self.__item: dict[str:JProxy] = dict()
self.__grave_obj_path = grave_obj_path
self.grave: ProxyGrave = load_proxy_grave(grave_obj_path)
def __getitem__(self, ip):
return self.__item.get(ip)
def __iter__(self):
return self.__item
def add(self, ip: str, proxy_: JProxy) -> None:
# 已有的代理,不作处理
if ip in self.__item:
return
# 增加新的代理,没有比对grave中死掉的代理
self.__item[ip] = proxy_
def get_random_proxy(self) -> dict:
"""
随机获取某个代理,
待处理:如果代理的列表是空,这个异常的处理
"""
ip = random.choice(list(self.__item.keys()))
return self.get(ip)
def get(self, ip) -> dict:
"""
通过ip获取某个指定的代理
"""
tmp: JProxy = self.__item[ip]
return format_proxy(ip, tmp.port)
def push_grave(self, ip):
"""
由于某个代理加入时,不会在grave中检查是否已经存在;
故,当某个这个代理死亡的时候,需要在grave中检测。若端口不一样,用新端口覆盖掉旧端口
字典删除
"""
# 删除的元素必须存在
if ip in self.__item.keys():
self.grave.receive(ip, self.__item[ip])
# 立即保存到本地
pickle.dump(self.grave, open(self.__grave_obj_path, 'wb'))
del self.__item[ip]
def __repr__(self):
return self.__item.__repr__()
def __len__(self):
return len(self.__item)
class ProxyGrave:
"""
没有实质上的用处,只是记录使用过的代理和次数
对于死亡的代理,会立即从变量中移除,故需要立刻写入grave文件中,避免程序崩溃造成的数据丢失
未来功能:
从grave中取出代理,有的代理在未来可能可以重新使用
"""
def __init__(self):
self.__items: dict[str:JProxy] = dict()
def __repr__(self):
return self.__items.__repr__()
def receive(self, ip: str, jp: JProxy):
if ip not in self.__items:
self.__items[ip] = jp
else:
# 有的代理可能会更换端口
self.__items[ip].port = jp.port
# 代理的请求次数加起来
self.__items[ip].cnt += jp.cnt
def load_proxy_grave(obj_path: str) -> ProxyGrave:
"""
若没有这个文件,那直接创建,返回一个空对象
"""
p = Path(obj_path)
if not p.exists():
p.parent.mkdir(parents=True, exist_ok=True)
p.touch()
return ProxyGrave()
try:
return pickle.load(open(p, 'rb'))
except:
return ProxyGrave()
if __name__ == '__main__':
grave_obj_path = r"D:\github\work\spider\gov_bx\code\obj\grave.obj"
proxy_impl = ProxyImpl(grave_obj_path)
get_proxys(proxy_impl, 5)
print(proxy_impl)
``` |
{
"source": "JieShenAI/leetcode",
"score": 3
} |
#### File: leetcode/jshen/leetcode.py
```python
class leetcode_test():
def __init__(self, func):
self.func = func
self.data_set = []
def add_parameter(self, result, **kwargs):
self.data_set.append((result, kwargs))
def test(self):
for (result, kwargs) in self.data_set:
ans = self.func(**kwargs)
if result == ans:
continue
else:
print(f"error: input: {kwargs}")
print(f"expect: {result}, but get: {ans}")
if __name__ == '__main__':
pass
```
#### File: leetcode/jshen/spider.py
```python
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
import time
import os
def sleep(t=0.3):
time.sleep(t)
class jselenium:
def __init__(self, driver_path):
self.driver = webdriver.Chrome(driver_path)
def hover_by_xpath(self, xpath_str):
element_to_hover_over = self.driver.find_element_by_xpath(xpath_str)
ActionChains(self.driver).move_to_element(element_to_hover_over).perform()
sleep()
def hover_by_css(self, css_selector):
element_to_hover_over = self.driver.find_element_by_css_selector(css_selector)
ActionChains(self.driver).move_to_element(element_to_hover_over).perform()
sleep()
def click_by_xpath(self, xpath_str):
self.driver.find_element_by_xpath(xpath_str).click()
sleep()
def main_windows_scroll(self, y, x=0):
js = f"window.scrollTo({x}, {y})"
self.driver.execute_script(js)
def windows_scroll_by_class(self, class_name, id, offset):
"""
把省窗口,滚动条下滑到offset
:param offset:
:return:
"""
js = f'document.getElementsByClassName("{class_name}")[{id}].scrollTop={offset}'
self.driver.execute_script(js)
sleep()
def __del__(self):
# self.driver.quit(),无法关闭chromedriver,会导致内存泄露。即使程序结束,chromedriver也会一直占用内存;
# bug说明: https://github.com/SeleniumHQ/selenium/issues/8571
# pass
sleep(2)
try:
self.driver.quit()
except:
# dos命令杀死chromedriver进程即可
os.system("taskkill /F /im chromedriver.exe")
``` |
{
"source": "jieshenboy/HeadTogetherOfKOG",
"score": 3
} |
#### File: com/getPictures/getHeroHeader.py
```python
import urllib.request
import json
import os
def getHeroHeader():
response = urllib.request.urlopen("http://pvp.qq.com/web201605/js/herolist.json")
hero_json = json.loads(response.read())
hero_num = len(hero_json)
# print(hero_num)
save_dir = './heroavatar/'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
for i in range(hero_num):
# avatar_name = hero_json[i]['ename']
save_file_name = save_dir + hero_json[i]['cname'] + '-' + str(hero_json[i]['ename']) + '.jpg'
# print(save_file_name)
avatar_url = 'http://game.gtimg.cn/images/yxzj/img201606/heroimg/' + str(hero_json[i]['ename']) + '/' + str(hero_json[i]['ename']) + '.jpg'
# print(avatar_url)
if not os.path.exists(save_file_name):
urllib.request.urlretrieve(avatar_url, save_file_name)
```
#### File: com/joinPictures/joinHeroHeader.py
```python
import math
import PIL.Image as Image
import os
#打开拼接画板
def joinPictures():
ls = os.listdir('D:/heroavatar')
each_size = int(math.sqrt(float(640*640)/len(ls)))#每个图像的面积像素
lines = int(640/each_size)#每个图像的边长像素
image = Image.new('RGBA', (640, 640))
x = 0
y = 0
for i in range(0,len(ls)):
try:
img = Image.open('./heroavatar'+'/'+ls[i])
except IOError:
print("Error")
else:
img = img.resize((each_size, each_size), Image.ANTIALIAS)
image.paste(img, (x * each_size, y * each_size))
x += 1
if x == lines:
x = 0
y += 1
image.save('./heroavatar' + "/" + "all.png")
``` |
{
"source": "jieshenboy/jeckstockpick",
"score": 3
} |
#### File: jeckstockpick/lMechanize/getHeaders.py
```python
def getHeaders(fileName):
headers = []
headerList = ['User-Agent','Cookie']
with open(fileName, 'r') as fp:
for line in fp.readlines():
name, value = line.split(':', 1)
if name in headerList:
headers.append((name.strip(), value.strip()))
return headers
if __name__=="__main__":
headers = getHeaders('headersRaw.txt')
print(headers)
```
#### File: jeckstockpick/lsys/testSys.py
```python
import sys
class ShowSysModule(object):
"""
这个类用于展示python标准库中的sys模块
"""
def __init__(self):
print(u'sys模块最常用的功能是获取程序的参数')
self.getArg()
print(u'其次就是获取当前系统的平台')
self.getOS()
def getArg(self):
print(u'开始获取参数的个数')
print(u'当前参数有 %d 个' %len(sys.argv))
print(u'这些参数分别是 %s' %sys.argv)
def getOS(self):
print(u'当前系统为:%s' %sys.platform)
if __name__=="__main__":
ssm = ShowSysModule()
```
#### File: qiushi/middlewares/customMiddlewares.py
```python
from scrapy.contrib.downloadermiddleware.useragent import UserAgentMiddleware
class CustomUserAgent(UserAgentMiddleware):
def process_request(self, request, spider):
ua = 'Mozilla/5.0 (Windows NT 6.1;WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/563.3'
request.headers.setdefault('User-Agent', ua)
class CustomProxy(object):
def process_request(self, request, spider):
request.meta['proxy'] = '172.16.58.3:8123'
```
#### File: weather/weather/pipelines.py
```python
import time
import os.path
import urllib2
class WeatherPipeline(object):
def process_item(self, item, spider):
today = time.strftime('%Y-%m-%d', time.localtime())
fileName = today+'.txt'
with open(fileName, 'a') as fp:
fp.write(item['cityDate'].encode('utf8') + '\t')
fp.write(item['week'].encode('utf8')+ '\t')
imgName = os.path.basename(item['img'])
fp.write(imgName, '\t')
if os.path.exists(imgName):
pass
else:
with open(imgName, 'wb') as fp:
response = urllib2.urlopen(item['img'])
fp.write(response.read())
fp.write(item['weather'].encode('utf8') + '\t')
fp.write(item['wind'].encode('utf8') + '\n\n')
time.sleep(1)
return item
``` |
{
"source": "Jie-su/WGAN-GP",
"score": 3
} |
#### File: WGAN-GP/model/wgan_64.py
```python
import torch
import torch.nn as nn
from common_net import LeakyReLUINSConv2d, ReLUBNNConvTranspose2d
from configuration import *
# Generator for 64x64 input size image
class Generator(nn.Module):
def __init__(self, hidden_dim=BASE_CHANNAL_DIM, noise_size=100):
super(Generator, self).__init__()
self.hidden_dim = hidden_dim
self.noise_size = noise_size
# Project random noise to input size
self.projector = nn.Sequential(
nn.Linear(self.noise_size, 4 * 4 * 8 * self.hidden_dim),
nn.BatchNorm1d(4 * 4 * 8 * self.hidden_dim),
nn.ReLU(True),
)
# Deconvolution block
self.deconv = nn.Sequential(
# 128 * 8 x 4 x 4 -> 128 * 4 x 8 x 8
ReLUBNNConvTranspose2d(8 * self.hidden_dim, 4 * self.hidden_dim,
kernel_size=5, stride=2, padding=2),
# 128 * 4 x 8 x 8 -> 128 * 2 x 16 x 16
ReLUBNNConvTranspose2d(4 * self.hidden_dim, 2 * self.hidden_dim,
kernel_size=5, stride=2, padding=2),
# 128 * 2 x 16 x 16 -> 128 x 32 x 32
ReLUBNNConvTranspose2d(2 * self.hidden_dim, 1 * self.hidden_dim,
kernel_size=5, stride=2, padding=2),
# 128 x 32 x 32 -> 3 x 64 x 64
nn.ConvTranspose2d(self.hidden_dim, 3, 2, stride=2),
nn.Tanh(),
)
def forward(self, input):
# Feedforward process
output = self.projector(input)
output = output.view(-1, 8 * self.hidden_dim, 4, 4)
output = self.deconv(output)
return output
# Discriminator for 64x64 input images
class Discriminator(nn.Module):
def __init__(self, hidden_dim=BASE_CHANNAL_DIM, input_channel=3):
super(Discriminator, self).__init__()
self.hidden_dim = hidden_dim
self.input_channel = input_channel
# Convolutional block
self.conv_block = nn.Sequential(
# 3 x 64 x 64 -> 128 x 32 x 32
nn.Conv2d(self.input_channel, self.hidden_dim, 5, 2, padding=2),
nn.LeakyReLU(0.2),
# 128 x 32 x 32 -> 256 x 16 x 16
LeakyReLUINSConv2d(self.hidden_dim,
2 * self.hidden_dim, 5, 2, padding=2),
# 256 x 16 x 16 -> 512 x 8 x 8
LeakyReLUINSConv2d(2 * self.hidden_dim,
4 * self.hidden_dim, 5, 2, padding=2),
# 512 x 8 x 8 -> 1024 x 4 x 4
LeakyReLUINSConv2d(4 * self.hidden_dim,
8 * self.hidden_dim, 5, 2, padding=2),
# 1024 x 4 x 4 -> 1 x 1 x 1
nn.Conv2d(8 * self.hidden_dim, 1, 4, 1, padding=0),
)
def forward(self, input):
output = self.conv_block(input)
return output.view(-1)
```
#### File: Jie-su/WGAN-GP/utils.py
```python
import torch
import torch.nn as nn
import time
import os
def time2str():
time_id = str(int(time.time()))
return time_id
def build_dirs(path):
try:
os.makedirs(path)
except OSError as e:
print(e)
```
#### File: Jie-su/WGAN-GP/wgan_gp_train.py
```python
from model.wgan_64 import Discriminator, Generator
import configuration as conf
from utils import build_dirs, time2str
from init import gaussian_weights_init
import torch
import torchvision
from torchvision import transforms
from torch.autograd import Variable
from torch.autograd import grad
import os
from os.path import join
import argparse
import tensorboardX
# Parameter setting
parser = argparse.ArgumentParser(description='WGAN-GP Training')
parser.add_argument('--workers', default=0, type=int, metavar='N',
help='number of data loading workers (default: 0)')
parser.add_argument('--epochs', default=conf.NUM_EPOCHS, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batch-size', default=conf.TRAIN_BATCH_SIZE,
type=int, metavar='N',
help='mini-batch size (default: {})'.format(conf.TRAIN_BATCH_SIZE), dest='batch_size')
parser.add_argument('--lr', '--learning-rate', default=conf.LEARNING_RATE,
type=float, metavar='LR', help='initial learning rate',
dest='learning_rate')
parser.add_argument('--mi', '--max-iterations', default=conf.MAX_ITERATIONS,
type=int, metavar='MI', help='Maximum iteration number',
dest='max_iterations')
parser.add_argument('--resume', default=0, type=int, metavar='check',
help='0 set as no resume and 1 set as resume')
parser.add_argument('--n_critic', default=5, type=int, metavar='NC',
help='number of critic training', dest='number_critic')
parser.add_argument('--gp-lambda', default=10, type=int, metavar='gp',
help='weight of gradient penalty', dest='gp_lambda')
# Main control script
def main():
args = parser.parse_args()
# initialize global path
global_path = os.path.dirname(os.path.realpath(__file__))
conf.global_path = global_path
print('the global path: '.format(global_path))
# configure the logging path.
conf.time_id = time2str()
conf.logging_path = join(global_path, './logs', conf.time_id)
conf.writting_path = join(conf.logging_path, './logging')
# configure checkpoint for images and models.
conf.image_directory = join(
conf.logging_path, conf.IMAGE_SAVING_DIRECTORY)
conf.model_directory = join(
conf.logging_path, conf.MODEL_SAVEING_DIRECTORY)
build_dirs(conf.image_directory)
build_dirs(conf.model_directory)
build_dirs(conf.writting_path)
conf.writer = tensorboardX.SummaryWriter(conf.writting_path)
# Setting parameters
conf.max_epochs = args.epochs
print('number epochs: {}'.format(conf.max_epochs))
conf.num_data_workers = args.workers
print('number of workers: {}'.format(conf.num_data_workers))
conf.lr = args.learning_rate
print('learning rate: {}'.format(conf.lr))
conf.batch_size = args.batch_size
print('batch size: {}'.format(conf.batch_size))
conf.max_iterations = args.max_iterations
print('max number of iterations: {}'.format(conf.max_iterations))
conf.n_critic = args.number_critic
print('number of critic training: {}'.format(conf.n_critic))
conf.gp_lambda = args.gp_lambda
print('gradient penalty weight: {}'.format(conf.gp_lambda))
train(conf)
# Dataset loading
def loading_data(conf):
print("Loading dataset....")
# Dataset = AWA_Dataset(join(conf.global_path, conf.DATA_ROOT))
transformation = torchvision.transforms.Compose(
[transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3)])
Dataset = torchvision.datasets.CIFAR10('./dataset/cifar10', train=True,
transform=transformation,
download=True)
train_loader = torch.utils.data.DataLoader(
dataset=Dataset, batch_size=conf.batch_size,
shuffle=True, num_workers=conf.num_data_workers)
print("Dataset length: {}".format(len(Dataset)))
conf.data_set_len = len(Dataset)
return train_loader
# Training initialization(Model Initialization)
def init_models(conf):
# Setup Models
print("Setup Models ......")
Generator_Model = Generator()
Discriminator_Model = Discriminator()
# Model weight initialization
print("Weight initialization......")
Generator_Model.apply(gaussian_weights_init)
Discriminator_Model.apply(gaussian_weights_init)
model = Generator_Model, Discriminator_Model
# Uploading the model into GPU
if torch.cuda.is_available():
Generator_Model.cuda()
Discriminator_Model.cuda()
# Setup the optimizers
print("Optimization Setup.......")
GeneratorOptimizor = torch.optim.Adam(
Generator_Model.parameters(), lr=conf.lr, betas=(0.5, 0.999))
DiscriminatorOptimizor = torch.optim.Adam(
Discriminator_Model.parameters(), lr=conf.lr, betas=(0.5, 0.999))
optimizer = GeneratorOptimizor, DiscriminatorOptimizor
# Fixing noise
fix_noise = torch.randn(conf.batch_size, conf.NOISE_SIZE)
return model, optimizer, fix_noise
def train(conf):
# Loading datasets
train_loader = loading_data(conf)
# Initialize Model
model, optimizer, fix_noise = init_models(conf)
Generator_Model, Discriminator_Model = model
GeneratorOptimizor, DiscriminatorOptimizor = optimizer
conf.iterations = 0
# Begin training Process
print("Begin Training Process........\n")
for epoch in range(0, conf.max_epochs):
Discriminator_Model.train()
for it, (image, _) in enumerate(train_loader):
Generator_Model.train()
if image.size(0) != conf.batch_size:
continue
conf.iterations += 1
if conf.iterations >= conf.max_iterations:
return
stop_flag = train_one_iteration(conf, image,
Generator_Model,
GeneratorOptimizor,
Discriminator_Model,
DiscriminatorOptimizor,
fix_noise)
if (it + 1) % 1 == 0:
print("Epoch: (%3d) (%5d/%5d)" % (epoch, it + 1, len(train_loader)))
if stop_flag:
return
def train_one_iteration(conf, image, Generator_Model, GeneratorOptimizor,
Discriminator_Model,
DiscriminatorOptimizor, fix_noise):
# Copy data to the gpu
if torch.cuda.is_available():
image = Variable(image.cuda())
fix_noise = Variable(fix_noise.cuda())
else:
image = Variable(image)
fix_noise = Variable(fix_noise)
# Discriminator Update
dis_update(conf, Generator_Model, Discriminator_Model,
DiscriminatorOptimizor, image)
# Generator Update
if conf.iterations % conf.n_critic == 0:
gen_update(conf, Generator_Model, Discriminator_Model,
GeneratorOptimizor, DiscriminatorOptimizor)
if (conf.iterations + 1) % 100 == 0:
Generator_Model.eval()
gen_image = Generator_Model(fix_noise)
# Save the output images
img_name = conf.image_directory + "/gen_image_" + str(conf.iterations + 1) + ".jpg"
torchvision.utils.save_image((gen_image.data + 1) / 2, img_name)
# # Save the input images
# img_name = conf.image_directory + "/input_image.jpg"
# torchvision.utils.save_image((image.data), img_name)
# Generator update
def dis_update(conf, Generator_Model, Discriminator_Model,
DiscriminatorOptimizor, image):
# Discriminator Update function
DiscriminatorOptimizor.zero_grad()
# Generate random noise
noise = torch.randn(conf.batch_size, conf.NOISE_SIZE)
if torch.cuda.is_available():
noise = Variable(noise.cuda())
else:
noise = Variable(noise)
# Get output from Generator
output = Generator_Model(noise)
# Feed forward Discriminator
fake_image_output = Discriminator_Model(output.detach())
real_image_output = Discriminator_Model(image)
# Calculate Wasserstein-1 Distance
w_distance = real_image_output.mean() - fake_image_output.mean()
# Calculate Gradient Penalty
g_penalty = gradient_penalty(image.data,
output.data,
Discriminator_Model)
dis_loss = -w_distance + g_penalty * conf.gp_lambda
# loss backprobagation
dis_loss.backward(retain_graph=True)
DiscriminatorOptimizor.step()
conf.writer.add_scalar('D/wd', w_distance.data.cpu().numpy(), global_step=conf.iterations)
conf.writer.add_scalar('D/gp', g_penalty.data.cpu().numpy(), global_step=conf.iterations)
conf.writer.add_scalar('D/total', w_distance.data.cpu().numpy() +
g_penalty.data.cpu().numpy(),
global_step=conf.iterations)
# Generator update
def gen_update(conf, Generator_Model, Discriminator_Model,
GeneratorOptimizor, DiscriminatorOptimizor):
# Generator Update function
# Optimizor setup
GeneratorOptimizor.zero_grad()
DiscriminatorOptimizor.zero_grad()
# Generate random noise
noise = torch.randn(conf.batch_size, conf.NOISE_SIZE)
if torch.cuda.is_available():
noise = Variable(noise.cuda())
else:
noise = Variable(noise)
# Get output from Generator
output = Generator_Model(noise)
# Feed Forward to Discriminator
fake_image_output = Discriminator_Model(output)
# Generator loss Calculation
gen_loss = -fake_image_output.mean()
# Loss backprobagation
gen_loss.backward()
GeneratorOptimizor.step()
conf.writer.add_scalars('G',
{"g_loss": gen_loss.data.cpu().numpy()},
global_step=conf.iterations)
# Gradient Penalty Calculation
def gradient_penalty(x, y, f):
# interpolation
shape = [x.size(0)] + [1] * (x.dim() - 1)
if torch.cuda.is_available():
alpha = torch.rand(shape).cuda()
else:
alpha = torch.rand(shape)
z = x + alpha * (y - x)
# gradient penalty
if torch.cuda.is_available():
z = Variable(z, requires_grad=True).cuda()
else:
z = Variable(z, requires_grad=True)
o = f(z)
if torch.cuda.is_available():
g = grad(o, z, grad_outputs=torch.ones(o.size()).cuda(), create_graph=True)[0].view(z.size(0), -1)
else:
g = grad(o, z, grad_outputs=torch.ones(o.size()), create_graph=True)[0].view(z.size(0), -1)
gp = ((g.norm(p=2, dim=1) - 1) ** 2).mean()
return gp
# Main Running control
if __name__ == '__main__':
main()
``` |
{
"source": "jietang1750/GuessWords",
"score": 4
} |
#### File: jietang1750/GuessWords/quadWords.py
```python
import json
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
def replaceCharacterInString(strWord,strChar,n):
tmpWord = ''
if n <= len(strWord) and len(strChar) == 1:
for i in range(0,len(strWord)):
if i != n - 1:
tmpWord = tmpWord + strWord[i]
else:
tmpWord = tmpWord + strChar
elif len(strChar) == 1:
tmpWord =word + strChar
else:
tmpWord = word + strChar
return(tmpWord)
def isWord(word,dictionary):
if word in dictionary[word[0]]:
return True
else:
return False
def clue(pool,confirmedChars,dictionary):
word = ''
wordList = []
i = 0
for i0 in range(0, len(pool[0])):
word0 = pool[0][i0]
# print("1",i1,word)
for i1 in range(0, len(pool[1])):
word1 = word0 + pool[1][i1]
# print("2", i2, word)
for i2 in range(0, len(pool[2])):
word2 = word1 + pool[2][i2]
# print("3", i3, word)
for i3 in range(0, len(pool[3])):
word3 = word2 + pool[3][i3]
# print("4", i4, word)
for i4 in range(0, len(pool[4])):
word4 = word3 + pool[4][i4]
word = word4
bConfirm = True
for k in range(0, len(confirmedChars)):
tmpChar = confirmedChars[k]
if (tmpChar not in word) and bConfirm:
bConfirm = False
# print("no", i1, i2, i3, i4, i5, word)
break
if bConfirm:
bWord = isWord(word, dict)
# print("yes", i1, i2, i3, i4, i5, word)
# print(wordList,word)
if bWord:
i += 1
wordList.append(word)
return(wordList)
def list2dict(wordList):
wordDict = {}
for word in wordList:
if word[0] not in wordDict.keys():
wordDict[word[0]] = []
wordDict[word[0]].append(word)
return wordDict
def formPool(whiteChar,yellowChar,greenChar):
pool=[]
conChar = ''
yPool = decompChar(yellowChar)
gPool = decompChar(greenChar)
# print ("gPool",gPool)
# print ("yPool",yPool)
for k in range (1,6):
tmpPool1 = whiteChar
for n in yPool.keys():
for tmpChar in yPool[n]:
if tmpChar not in tmpPool1:
tmpPool1 = tmpPool1 + tmpChar
if tmpChar not in conChar:
conChar = conChar + tmpChar
for n in gPool.keys():
for tmpChar in gPool[n]:
if tmpChar not in tmpPool1:
tmpPool1 = tmpPool1 + tmpChar
if tmpChar not in conChar:
conChar = conChar + tmpChar
if k in gPool.keys():
tmpPool1 = gPool[k]
if tmpPool1 not in conChar:
conChar = conChar + tmpPool1
if k in yPool.keys():
for tmpChar in yPool[k]:
if tmpChar in tmpPool1:
tmpPool2 = tmpPool1.replace(tmpChar,'')
tmpPool1 = tmpPool2
pool.append(tmpPool1)
return (pool,conChar)
def decompChar(strColChar):
colChar ={}
for i in range(1,len(strColChar)+1):
tmpChar = strColChar[i-1]
if tmpChar.isnumeric():
nCol = int(tmpChar)
#print (nCol)
if nCol not in colChar.keys():
colChar[nCol] = ''
# print("numb", tmpChar)
else:
if tmpChar not in colChar[nCol]:
colChar[nCol] = colChar[nCol] + tmpChar
# print("alpha", tmpChar)
return (colChar)
def removeCharFromWChar(wChar,word):
tmpPool = wChar
for tmpChar in word:
if tmpChar in tmpPool:
tmpPool1 = tmpPool.replace(tmpChar,'')
tmpPool = tmpPool1
# print(tmpChar,tmpPool,tmpPool1)
return(tmpPool)
def scoreWord(word,wChar):
tmpWord = ''
score = 0
for tmpChar in word:
if tmpChar not in tmpWord:
tmpWord = tmpWord + tmpChar
if tmpChar in wChar:
score += 1
if tmpChar in 'etaionshr':
score += 1
return score
def inChar (msg,myChar):
tmpChar = input(msg)
tmpLen = len(tmpChar)
if tmpLen > 0:
if tmpChar[0].isnumeric() or tmpChar[0] == 'z':
if tmpChar[0:2] == 'zz':
if tmpLen > 2:
myChar = tmpChar[2-len(tmpChar):]
else:
myChar = ''
else:
myChar = myChar + tmpChar
return (myChar)
else:
return(myChar)
else:
return(myChar)
wCharDefault = 'qypfgjzxbn'
yCharDefault = '2u5s'
gCharDefault = '1s4l5k'
filename = "dictionary5.json"
with open(filename, 'r') as file:
dict = json.load(file)
file.close
n = int(input("Your Quordle Round\n"))
wChar = 'abcdefghijklmnopqrstuvwxyz'
if n > 1:
wChar = input("White Keys, like: "+ wChar +"\n")
yChar = {}
gChar = {}
pool = {}
confirmedChars = {}
for nQuard in range (1,5):
yChar[nQuard] = ''
gChar[nQuard] = ''
bSuccess = {}
for nQuard in range(1, 5):
bSuccess[nQuard] = False
for k in range (n,10):
newList = []
for nQuard in range (1,5):
if not bSuccess[nQuard]:
if k > 1:
msg = str(nQuard) + ", Yellow Tiles, like, " + yChar[nQuard] + ":"
yCharIn = inChar(msg, yChar[nQuard])
msg = str(nQuard) + ", Green Tiles, like, " + gChar[nQuard] + ":"
gCharIn = inChar(msg, gChar[nQuard])
else:
yCharIn = ''
gCharIn = ''
yChar[nQuard] = yCharIn
gChar[nQuard] = gCharIn
#print('yChar', yChar[nQuard])
#print('gChar', gChar[nQuard])
#if wChar == '':
# wChar = wCharDefault
#if yChar == '':
# yChar = yCharDefault
#if gChar == '':
# gChar = gCharDefault
(pool[nQuard],confirmedChars[nQuard]) = formPool(wChar,yChar[nQuard],gChar[nQuard])
# print(k, nQuard,pool[nQuard])
# print(k, nQuard, confirmedChars[nQuard])
for nQuard in range(1, 5):
if not bSuccess[nQuard]:
if k > 1:
# print(pool[nQuard])
# print(confirmedChars[nQuard])
bGuess = True
for n in range(1,nQuard):
if pool[n] == pool[nQuard] and confirmedChars[n] == confirmedChars[nQuard]:
bGuess = False
wordList = []
break
if bGuess:
print("Guessing " + str(nQuard) + "...")
wordList = clue(pool[nQuard],confirmedChars[nQuard],dict)
#dict = list2dict(wordList)
nLen = len(wordList)
print (nLen, "words found.")
i=0
tmpWord = {}
for word in wordList:
i += 1
score = scoreWord(word,wChar)
if nLen <= 2:
score = 51 - nLen
# print (word, score)
tmpWord = {"score": score,"guess":word}
newList.append(tmpWord)
i=0
for singleWord in sorted(newList,key = lambda i:(-i['score'],i['guess'])):
print(i+1,singleWord["guess"], singleWord["score"])
i = i+1
if i >= 5:
break
if k <= 9:
guessWord = input ("Round " + str(k) +" Guess: ")
nSuccess = int(input("Is it the correct Guess? Enter 1, 2, 3 or 4."))
if nSuccess >= 1 and nSuccess <= 4:
bSuccess[nSuccess] = True
bSuccessCombined = True
for nQuard in range (1,5):
bSuccessCombined = bSuccessCombined and bSuccess[nQuard]
if bSuccessCombined:
break
else:
wChar = removeCharFromWChar(wChar,guessWord)
# print(wChar)
``` |
{
"source": "jieter/black",
"score": 2
} |
#### File: black/tests/test_black.py
```python
import asyncio
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from functools import partial
from io import BytesIO, TextIOWrapper
import os
from pathlib import Path
import re
import sys
from tempfile import TemporaryDirectory
from typing import Any, BinaryIO, Generator, List, Tuple, Iterator
import unittest
from unittest.mock import patch, MagicMock
from click import unstyle
from click.testing import CliRunner
import black
ll = 88
ff = partial(black.format_file_in_place, line_length=ll, fast=True)
fs = partial(black.format_str, line_length=ll)
THIS_FILE = Path(__file__)
THIS_DIR = THIS_FILE.parent
EMPTY_LINE = "# EMPTY LINE WITH WHITESPACE" + " (this comment will be removed)"
def dump_to_stderr(*output: str) -> str:
return "\n" + "\n".join(output) + "\n"
def read_data(name: str, data: bool = True) -> Tuple[str, str]:
"""read_data('test_name') -> 'input', 'output'"""
if not name.endswith((".py", ".pyi", ".out", ".diff")):
name += ".py"
_input: List[str] = []
_output: List[str] = []
base_dir = THIS_DIR / "data" if data else THIS_DIR
with open(base_dir / name, "r", encoding="utf8") as test:
lines = test.readlines()
result = _input
for line in lines:
line = line.replace(EMPTY_LINE, "")
if line.rstrip() == "# output":
result = _output
continue
result.append(line)
if _input and not _output:
# If there's no output marker, treat the entire file as already pre-formatted.
_output = _input[:]
return "".join(_input).strip() + "\n", "".join(_output).strip() + "\n"
@contextmanager
def cache_dir(exists: bool = True) -> Iterator[Path]:
with TemporaryDirectory() as workspace:
cache_dir = Path(workspace)
if not exists:
cache_dir = cache_dir / "new"
with patch("black.CACHE_DIR", cache_dir):
yield cache_dir
@contextmanager
def event_loop(close: bool) -> Iterator[None]:
policy = asyncio.get_event_loop_policy()
old_loop = policy.get_event_loop()
loop = policy.new_event_loop()
asyncio.set_event_loop(loop)
try:
yield
finally:
policy.set_event_loop(old_loop)
if close:
loop.close()
class BlackRunner(CliRunner):
"""Modify CliRunner so that stderr is not merged with stdout.
This is a hack that can be removed once we depend on Click 7.x"""
def __init__(self, stderrbuf: BinaryIO) -> None:
self.stderrbuf = stderrbuf
super().__init__()
@contextmanager
def isolation(self, *args: Any, **kwargs: Any) -> Generator[BinaryIO, None, None]:
with super().isolation(*args, **kwargs) as output:
try:
hold_stderr = sys.stderr
sys.stderr = TextIOWrapper(self.stderrbuf, encoding=self.charset)
yield output
finally:
sys.stderr = hold_stderr
class BlackTestCase(unittest.TestCase):
maxDiff = None
def assertFormatEqual(self, expected: str, actual: str) -> None:
if actual != expected and not os.environ.get("SKIP_AST_PRINT"):
bdv: black.DebugVisitor[Any]
black.out("Expected tree:", fg="green")
try:
exp_node = black.lib2to3_parse(expected)
bdv = black.DebugVisitor()
list(bdv.visit(exp_node))
except Exception as ve:
black.err(str(ve))
black.out("Actual tree:", fg="red")
try:
exp_node = black.lib2to3_parse(actual)
bdv = black.DebugVisitor()
list(bdv.visit(exp_node))
except Exception as ve:
black.err(str(ve))
self.assertEqual(expected, actual)
@patch("black.dump_to_file", dump_to_stderr)
def test_empty(self) -> None:
source = expected = ""
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
def test_empty_ff(self) -> None:
expected = ""
tmp_file = Path(black.dump_to_file())
try:
self.assertFalse(ff(tmp_file, write_back=black.WriteBack.YES))
with open(tmp_file, encoding="utf8") as f:
actual = f.read()
finally:
os.unlink(tmp_file)
self.assertFormatEqual(expected, actual)
@patch("black.dump_to_file", dump_to_stderr)
def test_self(self) -> None:
source, expected = read_data("test_black", data=False)
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
self.assertFalse(ff(THIS_FILE))
@patch("black.dump_to_file", dump_to_stderr)
def test_black(self) -> None:
source, expected = read_data("../black", data=False)
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
self.assertFalse(ff(THIS_DIR / ".." / "black.py"))
def test_piping(self) -> None:
source, expected = read_data("../black", data=False)
stderrbuf = BytesIO()
result = BlackRunner(stderrbuf).invoke(
black.main, ["-", "--fast", f"--line-length={ll}"], input=source
)
self.assertEqual(result.exit_code, 0)
self.assertFormatEqual(expected, result.output)
black.assert_equivalent(source, result.output)
black.assert_stable(source, result.output, line_length=ll)
def test_piping_diff(self) -> None:
diff_header = re.compile(
rf"(STDIN|STDOUT)\t\d\d\d\d-\d\d-\d\d "
rf"\d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d"
)
source, _ = read_data("expression.py")
expected, _ = read_data("expression.diff")
config = THIS_DIR / "data" / "empty_pyproject.toml"
stderrbuf = BytesIO()
args = ["-", "--fast", f"--line-length={ll}", "--diff", f"--config={config}"]
result = BlackRunner(stderrbuf).invoke(black.main, args, input=source)
self.assertEqual(result.exit_code, 0)
actual = diff_header.sub("[Deterministic header]", result.output)
actual = actual.rstrip() + "\n" # the diff output has a trailing space
self.assertEqual(expected, actual)
@patch("black.dump_to_file", dump_to_stderr)
def test_setup(self) -> None:
source, expected = read_data("../setup", data=False)
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
self.assertFalse(ff(THIS_DIR / ".." / "setup.py"))
@patch("black.dump_to_file", dump_to_stderr)
def test_function(self) -> None:
source, expected = read_data("function")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_function2(self) -> None:
source, expected = read_data("function2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_expression(self) -> None:
source, expected = read_data("expression")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
def test_expression_ff(self) -> None:
source, expected = read_data("expression")
tmp_file = Path(black.dump_to_file(source))
try:
self.assertTrue(ff(tmp_file, write_back=black.WriteBack.YES))
with open(tmp_file, encoding="utf8") as f:
actual = f.read()
finally:
os.unlink(tmp_file)
self.assertFormatEqual(expected, actual)
with patch("black.dump_to_file", dump_to_stderr):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
def test_expression_diff(self) -> None:
source, _ = read_data("expression.py")
expected, _ = read_data("expression.diff")
tmp_file = Path(black.dump_to_file(source))
diff_header = re.compile(
rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d "
rf"\d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d"
)
stderrbuf = BytesIO()
try:
result = BlackRunner(stderrbuf).invoke(
black.main, ["--diff", str(tmp_file)]
)
self.assertEqual(result.exit_code, 0)
finally:
os.unlink(tmp_file)
actual = result.output
actual = diff_header.sub("[Deterministic header]", actual)
actual = actual.rstrip() + "\n" # the diff output has a trailing space
if expected != actual:
dump = black.dump_to_file(actual)
msg = (
f"Expected diff isn't equal to the actual. If you made changes "
f"to expression.py and this is an anticipated difference, "
f"overwrite tests/expression.diff with {dump}"
)
self.assertEqual(expected, actual, msg)
@patch("black.dump_to_file", dump_to_stderr)
def test_fstring(self) -> None:
source, expected = read_data("fstring")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_string_quotes(self) -> None:
source, expected = read_data("string_quotes")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
mode = black.FileMode.NO_STRING_NORMALIZATION
not_normalized = fs(source, mode=mode)
self.assertFormatEqual(source, not_normalized)
black.assert_equivalent(source, not_normalized)
black.assert_stable(source, not_normalized, line_length=ll, mode=mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_slices(self) -> None:
source, expected = read_data("slices")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments(self) -> None:
source, expected = read_data("comments")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments2(self) -> None:
source, expected = read_data("comments2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments3(self) -> None:
source, expected = read_data("comments3")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments4(self) -> None:
source, expected = read_data("comments4")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_comments5(self) -> None:
source, expected = read_data("comments5")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_cantfit(self) -> None:
source, expected = read_data("cantfit")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_import_spacing(self) -> None:
source, expected = read_data("import_spacing")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_composition(self) -> None:
source, expected = read_data("composition")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_empty_lines(self) -> None:
source, expected = read_data("empty_lines")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_string_prefixes(self) -> None:
source, expected = read_data("string_prefixes")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_python2(self) -> None:
source, expected = read_data("python2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
# black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_python2_unicode_literals(self) -> None:
source, expected = read_data("python2_unicode_literals")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_stub(self) -> None:
mode = black.FileMode.PYI
source, expected = read_data("stub.pyi")
actual = fs(source, mode=mode)
self.assertFormatEqual(expected, actual)
black.assert_stable(source, actual, line_length=ll, mode=mode)
@patch("black.dump_to_file", dump_to_stderr)
def test_fmtonoff(self) -> None:
source, expected = read_data("fmtonoff")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_fmtonoff2(self) -> None:
source, expected = read_data("fmtonoff2")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_remove_empty_parentheses_after_class(self) -> None:
source, expected = read_data("class_blank_parentheses")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
@patch("black.dump_to_file", dump_to_stderr)
def test_new_line_between_class_and_code(self) -> None:
source, expected = read_data("class_methods_new_line")
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, line_length=ll)
def test_report_verbose(self) -> None:
report = black.Report(verbose=True)
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
report.done(Path("f1"), black.Changed.NO)
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "f1 already well formatted, good job.")
self.assertEqual(unstyle(str(report)), "1 file left unchanged.")
self.assertEqual(report.return_code, 0)
report.done(Path("f2"), black.Changed.YES)
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "reformatted f2")
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file left unchanged."
)
report.done(Path("f3"), black.Changed.CACHED)
self.assertEqual(len(out_lines), 3)
self.assertEqual(len(err_lines), 0)
self.assertEqual(
out_lines[-1], "f3 wasn't modified on disk since last run."
)
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 2 files left unchanged."
)
self.assertEqual(report.return_code, 0)
report.check = True
self.assertEqual(report.return_code, 1)
report.check = False
report.failed(Path("e1"), "boom")
self.assertEqual(len(out_lines), 3)
self.assertEqual(len(err_lines), 1)
self.assertEqual(err_lines[-1], "error: cannot format e1: boom")
self.assertEqual(
unstyle(str(report)),
"1 file reformatted, 2 files left unchanged, "
"1 file failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f3"), black.Changed.YES)
self.assertEqual(len(out_lines), 4)
self.assertEqual(len(err_lines), 1)
self.assertEqual(out_lines[-1], "reformatted f3")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, "
"1 file failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.failed(Path("e2"), "boom")
self.assertEqual(len(out_lines), 4)
self.assertEqual(len(err_lines), 2)
self.assertEqual(err_lines[-1], "error: cannot format e2: boom")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, "
"2 files failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.path_ignored(Path("wat"), "no match")
self.assertEqual(len(out_lines), 5)
self.assertEqual(len(err_lines), 2)
self.assertEqual(out_lines[-1], "wat ignored: no match")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, "
"2 files failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f4"), black.Changed.NO)
self.assertEqual(len(out_lines), 6)
self.assertEqual(len(err_lines), 2)
self.assertEqual(out_lines[-1], "f4 already well formatted, good job.")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 3 files left unchanged, "
"2 files failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.check = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, "
"2 files would fail to reformat.",
)
def test_report_quiet(self) -> None:
report = black.Report(quiet=True)
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
report.done(Path("f1"), black.Changed.NO)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(unstyle(str(report)), "1 file left unchanged.")
self.assertEqual(report.return_code, 0)
report.done(Path("f2"), black.Changed.YES)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file left unchanged."
)
report.done(Path("f3"), black.Changed.CACHED)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 2 files left unchanged."
)
self.assertEqual(report.return_code, 0)
report.check = True
self.assertEqual(report.return_code, 1)
report.check = False
report.failed(Path("e1"), "boom")
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 1)
self.assertEqual(err_lines[-1], "error: cannot format e1: boom")
self.assertEqual(
unstyle(str(report)),
"1 file reformatted, 2 files left unchanged, "
"1 file failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f3"), black.Changed.YES)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 1)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, "
"1 file failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.failed(Path("e2"), "boom")
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 2)
self.assertEqual(err_lines[-1], "error: cannot format e2: boom")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, "
"2 files failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.path_ignored(Path("wat"), "no match")
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, "
"2 files failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f4"), black.Changed.NO)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 3 files left unchanged, "
"2 files failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.check = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, "
"2 files would fail to reformat.",
)
def test_report_normal(self) -> None:
report = black.Report()
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
report.done(Path("f1"), black.Changed.NO)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(unstyle(str(report)), "1 file left unchanged.")
self.assertEqual(report.return_code, 0)
report.done(Path("f2"), black.Changed.YES)
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "reformatted f2")
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file left unchanged."
)
report.done(Path("f3"), black.Changed.CACHED)
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "reformatted f2")
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 2 files left unchanged."
)
self.assertEqual(report.return_code, 0)
report.check = True
self.assertEqual(report.return_code, 1)
report.check = False
report.failed(Path("e1"), "boom")
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 1)
self.assertEqual(err_lines[-1], "error: cannot format e1: boom")
self.assertEqual(
unstyle(str(report)),
"1 file reformatted, 2 files left unchanged, "
"1 file failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f3"), black.Changed.YES)
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 1)
self.assertEqual(out_lines[-1], "reformatted f3")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, "
"1 file failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.failed(Path("e2"), "boom")
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 2)
self.assertEqual(err_lines[-1], "error: cannot format e2: boom")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, "
"2 files failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.path_ignored(Path("wat"), "no match")
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, "
"2 files failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f4"), black.Changed.NO)
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 3 files left unchanged, "
"2 files failed to reformat.",
)
self.assertEqual(report.return_code, 123)
report.check = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, "
"2 files would fail to reformat.",
)
def test_is_python36(self) -> None:
node = black.lib2to3_parse("def f(*, arg): ...\n")
self.assertFalse(black.is_python36(node))
node = black.lib2to3_parse("def f(*, arg,): ...\n")
self.assertTrue(black.is_python36(node))
node = black.lib2to3_parse("def f(*, arg): f'string'\n")
self.assertTrue(black.is_python36(node))
source, expected = read_data("function")
node = black.lib2to3_parse(source)
self.assertTrue(black.is_python36(node))
node = black.lib2to3_parse(expected)
self.assertTrue(black.is_python36(node))
source, expected = read_data("expression")
node = black.lib2to3_parse(source)
self.assertFalse(black.is_python36(node))
node = black.lib2to3_parse(expected)
self.assertFalse(black.is_python36(node))
def test_get_future_imports(self) -> None:
node = black.lib2to3_parse("\n")
self.assertEqual(set(), black.get_future_imports(node))
node = black.lib2to3_parse("from __future__ import black\n")
self.assertEqual({"black"}, black.get_future_imports(node))
node = black.lib2to3_parse("from __future__ import multiple, imports\n")
self.assertEqual({"multiple", "imports"}, black.get_future_imports(node))
node = black.lib2to3_parse("from __future__ import (parenthesized, imports)\n")
self.assertEqual({"parenthesized", "imports"}, black.get_future_imports(node))
node = black.lib2to3_parse(
"from __future__ import multiple\nfrom __future__ import imports\n"
)
self.assertEqual({"multiple", "imports"}, black.get_future_imports(node))
node = black.lib2to3_parse("# comment\nfrom __future__ import black\n")
self.assertEqual({"black"}, black.get_future_imports(node))
node = black.lib2to3_parse('"""docstring"""\nfrom __future__ import black\n')
self.assertEqual({"black"}, black.get_future_imports(node))
node = black.lib2to3_parse("some(other, code)\nfrom __future__ import black\n")
self.assertEqual(set(), black.get_future_imports(node))
node = black.lib2to3_parse("from some.module import black\n")
self.assertEqual(set(), black.get_future_imports(node))
def test_debug_visitor(self) -> None:
source, _ = read_data("debug_visitor.py")
expected, _ = read_data("debug_visitor.out")
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
black.DebugVisitor.show(source)
actual = "\n".join(out_lines) + "\n"
log_name = ""
if expected != actual:
log_name = black.dump_to_file(*out_lines)
self.assertEqual(
expected,
actual,
f"AST print out is different. Actual version dumped to {log_name}",
)
def test_format_file_contents(self) -> None:
empty = ""
with self.assertRaises(black.NothingChanged):
black.format_file_contents(empty, line_length=ll, fast=False)
just_nl = "\n"
with self.assertRaises(black.NothingChanged):
black.format_file_contents(just_nl, line_length=ll, fast=False)
same = "l = [1, 2, 3]\n"
with self.assertRaises(black.NothingChanged):
black.format_file_contents(same, line_length=ll, fast=False)
different = "l = [1,2,3]"
expected = same
actual = black.format_file_contents(different, line_length=ll, fast=False)
self.assertEqual(expected, actual)
invalid = "return if you can"
with self.assertRaises(ValueError) as e:
black.format_file_contents(invalid, line_length=ll, fast=False)
self.assertEqual(str(e.exception), "Cannot parse: 1:7: return if you can")
def test_endmarker(self) -> None:
n = black.lib2to3_parse("\n")
self.assertEqual(n.type, black.syms.file_input)
self.assertEqual(len(n.children), 1)
self.assertEqual(n.children[0].type, black.token.ENDMARKER)
@unittest.skipIf(os.environ.get("SKIP_AST_PRINT"), "user set SKIP_AST_PRINT")
def test_assertFormatEqual(self) -> None:
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.out", out), patch("black.err", err):
with self.assertRaises(AssertionError):
self.assertFormatEqual("l = [1, 2, 3]", "l = [1, 2, 3,]")
out_str = "".join(out_lines)
self.assertTrue("Expected tree:" in out_str)
self.assertTrue("Actual tree:" in out_str)
self.assertEqual("".join(err_lines), "")
def test_cache_broken_file(self) -> None:
mode = black.FileMode.AUTO_DETECT
with cache_dir() as workspace:
cache_file = black.get_cache_file(black.DEFAULT_LINE_LENGTH, mode)
with cache_file.open("w") as fobj:
fobj.write("this is not a pickle")
self.assertEqual(black.read_cache(black.DEFAULT_LINE_LENGTH, mode), {})
src = (workspace / "test.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
result = CliRunner().invoke(black.main, [str(src)])
self.assertEqual(result.exit_code, 0)
cache = black.read_cache(black.DEFAULT_LINE_LENGTH, mode)
self.assertIn(src, cache)
def test_cache_single_file_already_cached(self) -> None:
mode = black.FileMode.AUTO_DETECT
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
black.write_cache({}, [src], black.DEFAULT_LINE_LENGTH, mode)
result = CliRunner().invoke(black.main, [str(src)])
self.assertEqual(result.exit_code, 0)
with src.open("r") as fobj:
self.assertEqual(fobj.read(), "print('hello')")
@event_loop(close=False)
def test_cache_multiple_files(self) -> None:
mode = black.FileMode.AUTO_DETECT
with cache_dir() as workspace, patch(
"black.ProcessPoolExecutor", new=ThreadPoolExecutor
):
one = (workspace / "one.py").resolve()
with one.open("w") as fobj:
fobj.write("print('hello')")
two = (workspace / "two.py").resolve()
with two.open("w") as fobj:
fobj.write("print('hello')")
black.write_cache({}, [one], black.DEFAULT_LINE_LENGTH, mode)
result = CliRunner().invoke(black.main, [str(workspace)])
self.assertEqual(result.exit_code, 0)
with one.open("r") as fobj:
self.assertEqual(fobj.read(), "print('hello')")
with two.open("r") as fobj:
self.assertEqual(fobj.read(), 'print("hello")\n')
cache = black.read_cache(black.DEFAULT_LINE_LENGTH, mode)
self.assertIn(one, cache)
self.assertIn(two, cache)
def test_no_cache_when_writeback_diff(self) -> None:
mode = black.FileMode.AUTO_DETECT
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
with src.open("w") as fobj:
fobj.write("print('hello')")
result = CliRunner().invoke(black.main, [str(src), "--diff"])
self.assertEqual(result.exit_code, 0)
cache_file = black.get_cache_file(black.DEFAULT_LINE_LENGTH, mode)
self.assertFalse(cache_file.exists())
def test_no_cache_when_stdin(self) -> None:
mode = black.FileMode.AUTO_DETECT
with cache_dir():
result = CliRunner().invoke(black.main, ["-"], input="print('hello')")
self.assertEqual(result.exit_code, 0)
cache_file = black.get_cache_file(black.DEFAULT_LINE_LENGTH, mode)
self.assertFalse(cache_file.exists())
def test_read_cache_no_cachefile(self) -> None:
mode = black.FileMode.AUTO_DETECT
with cache_dir():
self.assertEqual(black.read_cache(black.DEFAULT_LINE_LENGTH, mode), {})
def test_write_cache_read_cache(self) -> None:
mode = black.FileMode.AUTO_DETECT
with cache_dir() as workspace:
src = (workspace / "test.py").resolve()
src.touch()
black.write_cache({}, [src], black.DEFAULT_LINE_LENGTH, mode)
cache = black.read_cache(black.DEFAULT_LINE_LENGTH, mode)
self.assertIn(src, cache)
self.assertEqual(cache[src], black.get_cache_info(src))
def test_filter_cached(self) -> None:
with TemporaryDirectory() as workspace:
path = Path(workspace)
uncached = (path / "uncached").resolve()
cached = (path / "cached").resolve()
cached_but_changed = (path / "changed").resolve()
uncached.touch()
cached.touch()
cached_but_changed.touch()
cache = {cached: black.get_cache_info(cached), cached_but_changed: (0.0, 0)}
todo, done = black.filter_cached(
cache, {uncached, cached, cached_but_changed}
)
self.assertEqual(todo, {uncached, cached_but_changed})
self.assertEqual(done, {cached})
def test_write_cache_creates_directory_if_needed(self) -> None:
mode = black.FileMode.AUTO_DETECT
with cache_dir(exists=False) as workspace:
self.assertFalse(workspace.exists())
black.write_cache({}, [], black.DEFAULT_LINE_LENGTH, mode)
self.assertTrue(workspace.exists())
@event_loop(close=False)
def test_failed_formatting_does_not_get_cached(self) -> None:
mode = black.FileMode.AUTO_DETECT
with cache_dir() as workspace, patch(
"black.ProcessPoolExecutor", new=ThreadPoolExecutor
):
failing = (workspace / "failing.py").resolve()
with failing.open("w") as fobj:
fobj.write("not actually python")
clean = (workspace / "clean.py").resolve()
with clean.open("w") as fobj:
fobj.write('print("hello")\n')
result = CliRunner().invoke(black.main, [str(workspace)])
self.assertEqual(result.exit_code, 123)
cache = black.read_cache(black.DEFAULT_LINE_LENGTH, mode)
self.assertNotIn(failing, cache)
self.assertIn(clean, cache)
def test_write_cache_write_fail(self) -> None:
mode = black.FileMode.AUTO_DETECT
with cache_dir(), patch.object(Path, "open") as mock:
mock.side_effect = OSError
black.write_cache({}, [], black.DEFAULT_LINE_LENGTH, mode)
@event_loop(close=False)
def test_check_diff_use_together(self) -> None:
with cache_dir():
# Files which will be reformatted.
src1 = (THIS_DIR / "data" / "string_quotes.py").resolve()
result = CliRunner().invoke(black.main, [str(src1), "--diff", "--check"])
self.assertEqual(result.exit_code, 1, result.output)
# Files which will not be reformatted.
src2 = (THIS_DIR / "data" / "composition.py").resolve()
result = CliRunner().invoke(black.main, [str(src2), "--diff", "--check"])
self.assertEqual(result.exit_code, 0, result.output)
# Multi file command.
result = CliRunner().invoke(
black.main, [str(src1), str(src2), "--diff", "--check"]
)
self.assertEqual(result.exit_code, 1, result.output)
def test_no_files(self) -> None:
with cache_dir():
# Without an argument, black exits with error code 0.
result = CliRunner().invoke(black.main, [])
self.assertEqual(result.exit_code, 0)
def test_broken_symlink(self) -> None:
with cache_dir() as workspace:
symlink = workspace / "broken_link.py"
try:
symlink.symlink_to("nonexistent.py")
except OSError as e:
self.skipTest(f"Can't create symlinks: {e}")
result = CliRunner().invoke(black.main, [str(workspace.resolve())])
self.assertEqual(result.exit_code, 0)
def test_read_cache_line_lengths(self) -> None:
mode = black.FileMode.AUTO_DETECT
with cache_dir() as workspace:
path = (workspace / "file.py").resolve()
path.touch()
black.write_cache({}, [path], 1, mode)
one = black.read_cache(1, mode)
self.assertIn(path, one)
two = black.read_cache(2, mode)
self.assertNotIn(path, two)
def test_single_file_force_pyi(self) -> None:
reg_mode = black.FileMode.AUTO_DETECT
pyi_mode = black.FileMode.PYI
contents, expected = read_data("force_pyi")
with cache_dir() as workspace:
path = (workspace / "file.py").resolve()
with open(path, "w") as fh:
fh.write(contents)
result = CliRunner().invoke(black.main, [str(path), "--pyi"])
self.assertEqual(result.exit_code, 0)
with open(path, "r") as fh:
actual = fh.read()
# verify cache with --pyi is separate
pyi_cache = black.read_cache(black.DEFAULT_LINE_LENGTH, pyi_mode)
self.assertIn(path, pyi_cache)
normal_cache = black.read_cache(black.DEFAULT_LINE_LENGTH, reg_mode)
self.assertNotIn(path, normal_cache)
self.assertEqual(actual, expected)
@event_loop(close=False)
def test_multi_file_force_pyi(self) -> None:
reg_mode = black.FileMode.AUTO_DETECT
pyi_mode = black.FileMode.PYI
contents, expected = read_data("force_pyi")
with cache_dir() as workspace:
paths = [
(workspace / "file1.py").resolve(),
(workspace / "file2.py").resolve(),
]
for path in paths:
with open(path, "w") as fh:
fh.write(contents)
result = CliRunner().invoke(black.main, [str(p) for p in paths] + ["--pyi"])
self.assertEqual(result.exit_code, 0)
for path in paths:
with open(path, "r") as fh:
actual = fh.read()
self.assertEqual(actual, expected)
# verify cache with --pyi is separate
pyi_cache = black.read_cache(black.DEFAULT_LINE_LENGTH, pyi_mode)
normal_cache = black.read_cache(black.DEFAULT_LINE_LENGTH, reg_mode)
for path in paths:
self.assertIn(path, pyi_cache)
self.assertNotIn(path, normal_cache)
def test_pipe_force_pyi(self) -> None:
source, expected = read_data("force_pyi")
result = CliRunner().invoke(black.main, ["-", "-q", "--pyi"], input=source)
self.assertEqual(result.exit_code, 0)
actual = result.output
self.assertFormatEqual(actual, expected)
def test_single_file_force_py36(self) -> None:
reg_mode = black.FileMode.AUTO_DETECT
py36_mode = black.FileMode.PYTHON36
source, expected = read_data("force_py36")
with cache_dir() as workspace:
path = (workspace / "file.py").resolve()
with open(path, "w") as fh:
fh.write(source)
result = CliRunner().invoke(black.main, [str(path), "--py36"])
self.assertEqual(result.exit_code, 0)
with open(path, "r") as fh:
actual = fh.read()
# verify cache with --py36 is separate
py36_cache = black.read_cache(black.DEFAULT_LINE_LENGTH, py36_mode)
self.assertIn(path, py36_cache)
normal_cache = black.read_cache(black.DEFAULT_LINE_LENGTH, reg_mode)
self.assertNotIn(path, normal_cache)
self.assertEqual(actual, expected)
@event_loop(close=False)
def test_multi_file_force_py36(self) -> None:
reg_mode = black.FileMode.AUTO_DETECT
py36_mode = black.FileMode.PYTHON36
source, expected = read_data("force_py36")
with cache_dir() as workspace:
paths = [
(workspace / "file1.py").resolve(),
(workspace / "file2.py").resolve(),
]
for path in paths:
with open(path, "w") as fh:
fh.write(source)
result = CliRunner().invoke(
black.main, [str(p) for p in paths] + ["--py36"]
)
self.assertEqual(result.exit_code, 0)
for path in paths:
with open(path, "r") as fh:
actual = fh.read()
self.assertEqual(actual, expected)
# verify cache with --py36 is separate
pyi_cache = black.read_cache(black.DEFAULT_LINE_LENGTH, py36_mode)
normal_cache = black.read_cache(black.DEFAULT_LINE_LENGTH, reg_mode)
for path in paths:
self.assertIn(path, pyi_cache)
self.assertNotIn(path, normal_cache)
def test_pipe_force_py36(self) -> None:
source, expected = read_data("force_py36")
result = CliRunner().invoke(black.main, ["-", "-q", "--py36"], input=source)
self.assertEqual(result.exit_code, 0)
actual = result.output
self.assertFormatEqual(actual, expected)
def test_include_exclude(self) -> None:
path = THIS_DIR / "data" / "include_exclude_tests"
include = re.compile(r"\.pyi?$")
exclude = re.compile(r"/exclude/|/\.definitely_exclude/")
report = black.Report()
sources: List[Path] = []
expected = [
Path(path / "b/dont_exclude/a.py"),
Path(path / "b/dont_exclude/a.pyi"),
]
this_abs = THIS_DIR.resolve()
sources.extend(
black.gen_python_files_in_dir(path, this_abs, include, exclude, report)
)
self.assertEqual(sorted(expected), sorted(sources))
def test_empty_include(self) -> None:
path = THIS_DIR / "data" / "include_exclude_tests"
report = black.Report()
empty = re.compile(r"")
sources: List[Path] = []
expected = [
Path(path / "b/exclude/a.pie"),
Path(path / "b/exclude/a.py"),
Path(path / "b/exclude/a.pyi"),
Path(path / "b/dont_exclude/a.pie"),
Path(path / "b/dont_exclude/a.py"),
Path(path / "b/dont_exclude/a.pyi"),
Path(path / "b/.definitely_exclude/a.pie"),
Path(path / "b/.definitely_exclude/a.py"),
Path(path / "b/.definitely_exclude/a.pyi"),
]
this_abs = THIS_DIR.resolve()
sources.extend(
black.gen_python_files_in_dir(
path, this_abs, empty, re.compile(black.DEFAULT_EXCLUDES), report
)
)
self.assertEqual(sorted(expected), sorted(sources))
def test_empty_exclude(self) -> None:
path = THIS_DIR / "data" / "include_exclude_tests"
report = black.Report()
empty = re.compile(r"")
sources: List[Path] = []
expected = [
Path(path / "b/dont_exclude/a.py"),
Path(path / "b/dont_exclude/a.pyi"),
Path(path / "b/exclude/a.py"),
Path(path / "b/exclude/a.pyi"),
Path(path / "b/.definitely_exclude/a.py"),
Path(path / "b/.definitely_exclude/a.pyi"),
]
this_abs = THIS_DIR.resolve()
sources.extend(
black.gen_python_files_in_dir(
path, this_abs, re.compile(black.DEFAULT_INCLUDES), empty, report
)
)
self.assertEqual(sorted(expected), sorted(sources))
def test_invalid_include_exclude(self) -> None:
for option in ["--include", "--exclude"]:
result = CliRunner().invoke(black.main, ["-", option, "**()(!!*)"])
self.assertEqual(result.exit_code, 2)
def test_preserves_line_endings(self) -> None:
with TemporaryDirectory() as workspace:
test_file = Path(workspace) / "test.py"
for nl in ["\n", "\r\n"]:
contents = nl.join(["def f( ):", " pass"])
test_file.write_bytes(contents.encode())
ff(test_file, write_back=black.WriteBack.YES)
updated_contents: bytes = test_file.read_bytes()
self.assertIn(nl.encode(), updated_contents) # type: ignore
if nl == "\n":
self.assertNotIn(b"\r\n", updated_contents) # type: ignore
def test_assert_equivalent_different_asts(self) -> None:
with self.assertRaises(AssertionError):
black.assert_equivalent("{}", "None")
def test_symlink_out_of_root_directory(self) -> None:
path = MagicMock()
root = THIS_DIR
child = MagicMock()
include = re.compile(black.DEFAULT_INCLUDES)
exclude = re.compile(black.DEFAULT_EXCLUDES)
report = black.Report()
# `child` should behave like a symlink which resolved path is clearly
# outside of the `root` directory.
path.iterdir.return_value = [child]
child.resolve.return_value = Path("/a/b/c")
child.is_symlink.return_value = True
try:
list(black.gen_python_files_in_dir(path, root, include, exclude, report))
except ValueError as ve:
self.fail("`get_python_files_in_dir()` failed: {ve}")
path.iterdir.assert_called_once()
child.resolve.assert_called_once()
child.is_symlink.assert_called_once()
# `child` should behave like a strange file which resolved path is clearly
# outside of the `root` directory.
child.is_symlink.return_value = False
with self.assertRaises(ValueError):
list(black.gen_python_files_in_dir(path, root, include, exclude, report))
path.iterdir.assert_called()
self.assertEqual(path.iterdir.call_count, 2)
child.resolve.assert_called()
self.assertEqual(child.resolve.call_count, 2)
child.is_symlink.assert_called()
self.assertEqual(child.is_symlink.call_count, 2)
def test_shhh_click(self) -> None:
try:
from click import _unicodefun # type: ignore
except ModuleNotFoundError:
self.skipTest("Incompatible Click version")
if not hasattr(_unicodefun, "_verify_python3_env"):
self.skipTest("Incompatible Click version")
# First, let's see if Click is crashing with a preferred ASCII charset.
with patch("locale.getpreferredencoding") as gpe:
gpe.return_value = "ASCII"
with self.assertRaises(RuntimeError):
_unicodefun._verify_python3_env()
# Now, let's silence Click...
black.patch_click()
# ...and confirm it's silent.
with patch("locale.getpreferredencoding") as gpe:
gpe.return_value = "ASCII"
try:
_unicodefun._verify_python3_env()
except RuntimeError as re:
self.fail(f"`patch_click()` failed, exception still raised: {re}")
if __name__ == "__main__":
unittest.main(module="test_black")
``` |
{
"source": "jieter/python-loradecrypt",
"score": 3
} |
#### File: python-loradecrypt/tests/test_crypto.py
```python
import unittest
from lora.crypto import generate_appskey, loramac_decrypt
class TestCrypto(unittest.TestCase):
def test_loramac_decrypt(self):
key = "271E403DF4225EEF7E90836494A5B345"
dev_addr = "000015E4"
payloads = ((0, "73100b90"), (1, "68d388f0"), (2, "0a12e808"), (3, "e3413bee"))
expected = "cafebabe"
for sequence_counter, payload_hex in payloads:
plaintext_ints = loramac_decrypt(payload_hex, sequence_counter, key, dev_addr)
plaintext_hex = "".join("{:02x}".format(x) for x in plaintext_ints)
self.assertEquals(plaintext_hex, expected)
def test_appskey(self):
key = generate_appskey()
self.assertEquals(len(key), 32)
self.assertNotEquals(key, generate_appskey())
self.assertNotEquals(generate_appskey(), generate_appskey())
``` |
{
"source": "JieTrancender/crawl_code",
"score": 3
} |
#### File: JieTrancender/crawl_code/crawl_story.py
```python
import array
import re
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
from cn2an import cn2an
import time
import numpy as np
def get_content(url):
r = requests.get(url = url)
html = r.text
bs = BeautifulSoup(html, 'lxml')
texts = bs.find('div', id = 'content')
content = texts.text.strip().split('\xa0'*4)
return content
if __name__ == '__main__':
book_name = '诡秘之主.txt'
host = 'https://www.xinbiquge.org'
url = host + '/xiaoshuo/1bm.html'
r = requests.get(url = url)
html = r.text
bs = BeautifulSoup(html, 'lxml')
listmain = bs.find('div', class_ = 'listmain')
chapters = listmain.find_all('a')
chapterList = []
multi = {}
for chapter in chapters:
url = host + chapter.get('href')
if chapter.string:
result = re.match('(\d+)(.*)', chapter.string)
chapter_id = 1
if result == None:
result = re.match('第(.*?)章(.*)', chapter.string)
chapter_id = cn2an(result.group(1))
chapter_id = str(chapter_id)
else:
chapter_id = result.group(1)
if not chapter_id in multi:
chapter_name = result.group(2)
multi[chapter_id] = True
chapterList.append([chapter_id, chapter_name, url])
chapterList.sort(key=lambda chapter: int(chapter[0]))
for chapter in tqdm(chapterList):
content = get_content(chapter[2])
with open(book_name, 'a', encoding='utf-8') as f:
f.write(chapter[1])
f.write('\n'.join(content))
time.sleep(10)
``` |
{
"source": "jietui/django_project",
"score": 3
} |
#### File: practice/booktest/models.py
```python
from django.db import models
from django.db.models import F, Q
from django.db.models import Sum, Min, Max, Avg, Count
# Create your models here.
# 自定义图书管理器
class BookInfoManager(models.Manager):
def all(self):
return super(BookInfoManager, self).all().filter(is_delete=False)
def create_book(self, title, pub_date, read=0, comment=0, is_delete=False):
book = self.model()
book.btitle = title
book.bpub_date = pub_date
book.bread = read
book.bcomment = comment
book.is_delete = is_delete
book.save()
return book
class BookInfo(models.Model):
# 模型中定义管理器
books = BookInfoManager()
btitle = models.CharField(max_length=20, verbose_name='名称')
bpub_date = models.DateField(verbose_name='发布日期')
bread = models.IntegerField(default=0, verbose_name='阅读量')
bcomment = models.IntegerField(default=0, verbose_name='评论量')
is_delete = models.BooleanField(default=False, verbose_name='逻辑删除')
class Meta:
db_table = 'tb_books'
verbose_name = '图书'
verbose_name_plural = verbose_name
def __str__(self):
return self.btitle
class HeroInfo(models.Model):
GENDER_CHOICES = (
(0, 'famale'),
(1, 'male')
)
hname = models.CharField(max_length=20, verbose_name='名称')
hgender = models.SmallIntegerField(choices=GENDER_CHOICES, default=0, verbose_name='性别')
hcomment = models.CharField(max_length=200, null=True, verbose_name='描述信息')
hbook = models.ForeignKey(BookInfo, on_delete=models.CASCADE, verbose_name='图书')
is_delete = models.BooleanField(default=False, verbose_name='逻辑删除')
class Meta:
db_table = 'tb_heroes'
verbose_name = "英雄"
verbose_name_plural = verbose_name
def __str__(self):
return self.hname
```
#### File: practice/users/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render
from django.urls import reverse
# Create your views here.
def index(request):
url = reverse('users:index')
print(url)
return HttpResponse('hello world')
``` |
{
"source": "JiEumKim/github",
"score": 3
} |
#### File: github/gnlse/envelopes.py
```python
import numpy as np
class Envelope(object):
def A(T):
raise NotImplementedError()
class SechEnvelope(Envelope):
"""Amplitude envelope of hyperbolic secant pulse.
Attributes
----------
Pmax : float
Peak power, [W].
FWHM : float
Pulse duration Full-Width Half-Maximum.
"""
def __init__(self, Pmax, FWHM):
self.name = 'Hyperbolic secant envelope'
self.Pmax = Pmax
self.FWHM = FWHM
def A(self, T):
"""
Parameters
----------
T : ndarray, (n, )
Time vector
Returns
-------
ndarray, (n, )
Amplitude envelope of hyperbolic secant pulse in time.
"""
m = 2 * np.log(1 + np.sqrt(2))
return np.sqrt(self.Pmax) * 2 / (np.exp(m * T / self.FWHM) +
np.exp(-m * T / self.FWHM))
class GaussianEnvelope(Envelope):
"""Amplitude envelope of gaussian pulse.
Attributes
----------
Pmax : float
Peak power [W].
FWHM : float
Pulse duration Full-Width Half-Maximum.
"""
def __init__(self, Pmax, FWHM):
self.name = 'Gaussian envelope'
self.Pmax = Pmax
self.FWHM = FWHM
def A(self, T):
"""
Parameters
----------
T : ndarray, (n, )
Time vector.
Returns
-------
ndarray, (n, )
Amplitude envelope of gaussian pulse in time.
"""
m = 4 * np.log(2)
return np.sqrt(self.Pmax) * np.exp(-m * .5 * T**2 / self.FWHM**2)
class LorentzianEnvelope(Envelope):
"""Amplitude envelope of lorentzian pulse.
Attributes
----------
Pmax : float
Peak power [W].
FWHM : float
Pulse duration Full-Width Half-Maximum.
"""
def __init__(self, Pmax, FWHM):
self.name = 'Lorentzian envelope'
self.Pmax = Pmax
self.FWHM = FWHM
def A(self, T):
"""
Parameters
----------
T : ndarray, (n, )
Time vector.
Returns
-------
ndarray, (n, )
Amplitude envelope of lorentzian pulse in time.
"""
m = 2 * np.sqrt(np.sqrt(2) - 1)
return np.sqrt(self.Pmax) / (1 + (m * T / self.FWHM)**2)
class CWEnvelope(Envelope):
"""Amplitude envelope of continious wave
with or without some temporal noise.
Attributes
----------
Pmax : float
Peak power [W].
Pn : float, optional
Peak power for noise [W].
"""
def __init__(self, Pmax, Pn=0):
self.name = 'Continious Wave'
self.Pmax = Pmax
self.Pn = Pn
def A(self, T):
"""
Parameters
----------
T : ndarray, (n, )
Time vector.
Returns
-------
ndarray, (n, )
Amplitude envelope of continious wave in time.
"""
cw = np.fft.ifft(np.sqrt(self.Pmax) * np.ones(np.size(T)))
noise = 0
if self.Pn:
noise = np.sqrt(self.Pn
) * np.exp(
1j * 2 * np.pi * np.random.rand(np.size(T)))
return np.fft.fft(cw + noise)
```
#### File: github/gnlse/gnlse.py
```python
import numpy as np
import scipy.integrate
import pyfftw
import tqdm
from gnlse.common import c
from gnlse.import_export import write_mat, read_mat
class GNLSESetup:
"""
Model inputs for the ``GNLSE`` class.
Attributes
----------
resolution : int
Number of points on the computational grid. Determines time resolution
and bandwidth. Avoid numbers with large prime factors.
time_window : float [ps]
Width of the time window.
wavelength : float [nm]
Central wavelength of the input impulse.
fiber_length : float [m]
Length of the simulated optical fiber.
z_saves : int
Number of snapshots to save along the fiber. Larger numbers require
more memory to store the result.
nonlinearity : float [1/W/m]
Effective nonlinearity.
impulse_model : Envelope
Input impulse envelope model.
dispersion_model : Dispersion, optional
Fiber dispersion model or ``None`` to model a dispersionless fiber.
raman_model : function, optional
Raman scattering model or ``None`` if the effect is to be neglected.
self_steepning : bool, optional
Whether to include the effect of self-steepening. Disabled by default.
rtol : float, optional
Relative tolerance passed to the ODE solver.
atol : float, optional
Absolute tolerance passed to the ODE solver.
method : str, optional
Integration method passed to the ODE solver.
"""
def __init__(self):
self.resolution = None
self.time_window = None
self.wavelength = None
self.fiber_length = None
self.z_saves = 200
self.nonlinearity = 0
self.impulse_model = None
self.dispersion_model = None
self.raman_model = None
self.self_steepening = False
self.rtol = 1e-3
self.atol = 1e-4
self.method = 'RK45'
class Solution:
"""
Represents a solution to a GNLSE problem.
Attributes
----------
t : ndarray, (n,)
Time domain grid.
W : ndarray, (n,)
Absolute angular frequency grid.
Z : ndarray (m,)
Points at which intermediate steps were saved.
At : ndarray, (n, m)
Intermediate steps in the time domain.
AW : ndarray, (n, m)
Intermediate steps in the frequency domain.
"""
def __init__(self, t=None, W=None, Z=None, At=None, AW=None,
Aty=None, AWy=None):
self.t = t
self.W = W
self.Z = Z
self.At = At
self.AW = AW
# aditional solutions in case of two-mode example
self.Aty = Aty
self.AWy = AWy
def to_file(self, path):
"""
Saves a solution to a file.
Parameters
----------
path : str
Path to file.
"""
data = {'t': self.t, 'W': self.W, 'Z': self.Z, 'At': self.At,
'AW': self.AW}
write_mat(data, path)
def from_file(self, path):
"""
Load a solution from file.
Parameters
----------
path : str
Path to file.
"""
data = read_mat(path)
self.t = data['t']
self.W = data['W']
self.Z = data['Z']
self.At = data['At']
self.AW = data['AW']
class GNLSE:
"""
Models propagation of an optical impulse in a fiber by integrating
the generalized non-linear Schrödinger equation.
Attributes
----------
setup : GNLSESetup
Model inputs in the form of a ``GNLSESetup`` object.
"""
def __init__(self, setup):
if not isinstance(setup, GNLSESetup):
raise TypeError("setup is not an instance of GNLSESetup")
if setup.resolution is None:
raise ValueError("'resolution' not set")
if setup.time_window is None:
raise ValueError("'time_window' not set")
if setup.wavelength is None:
raise ValueError("'wavelength' not set")
if setup.fiber_length is None:
raise ValueError("'fiber_length' not set")
if setup.impulse_model is None:
raise ValueError("'impulse_model' not set")
# simulation parameters
self.fiber_length = setup.fiber_length
self.z_saves = setup.z_saves
self.rtol = setup.rtol
self.atol = setup.atol
self.method = setup.method
self.N = setup.resolution
# Time domain grid
self.t = np.linspace(-setup.time_window / 2,
setup.time_window / 2,
self.N)
# Relative angular frequency grid
self.V = 2 * np.pi * np.arange(-self.N / 2,
self.N / 2
) / (self.N * (self.t[1] - self.t[0]))
# Central angular frequency [10^12 rad]
w_0 = (2.0 * np.pi * c) / setup.wavelength
self.Omega = self.V + w_0
# Absolute angular frequency grid
if setup.self_steepening and np.abs(w_0) > np.finfo(float).eps:
W = self.V + w_0
else:
W = np.full(self.V.shape, w_0)
self.W = np.fft.fftshift(W)
# Nonlinearity
if hasattr(setup.nonlinearity, 'gamma'):
# in case in of frequency dependent nonlinearity
gamma, self.scale = setup.nonlinearity.gamma(self.V + w_0)
self.gamma = gamma / w_0
self.gamma = np.fft.fftshift(self.gamma)
self.scale = np.fft.fftshift(self.scale)
else:
# in case in of direct introduced value
self.gamma = setup.nonlinearity / w_0
self.scale = 1
# Raman scattering
self.RW = None
if setup.raman_model:
self.fr, RT = setup.raman_model(self.t)
if np.abs(self.fr) < np.finfo(float).eps:
self.RW = None
else:
self.RW = self.N * np.fft.ifft(
np.fft.fftshift(np.transpose(RT)))
# Dispersion operator
if setup.dispersion_model:
self.D = setup.dispersion_model.D(self.V)
else:
self.D = np.zeros(self.V.shape)
# Input impulse
if hasattr(setup.impulse_model, 'A'):
self.A = setup.impulse_model.A(self.t)
else:
self.A = setup.impulse_model
def run(self):
"""
Solve one mode GNLSE equation described by the given
``GNLSESetup`` object.
Returns
-------
setup : Solution
Simulation results in the form of a ``Solution`` object.
"""
dt = self.t[1] - self.t[0]
self.D = np.fft.fftshift(self.D)
x = pyfftw.empty_aligned(self.N, dtype="complex128")
X = pyfftw.empty_aligned(self.N, dtype="complex128")
plan_forward = pyfftw.FFTW(x, X)
plan_inverse = pyfftw.FFTW(X, x, direction="FFTW_BACKWARD")
progress_bar = tqdm.tqdm(total=self.fiber_length, unit='m')
def rhs(z, AW):
"""
The right hand side of the differential equation to integrate.
"""
progress_bar.n = round(z, 3)
progress_bar.update(0)
x[:] = AW * np.exp(self.D * z)
At = plan_forward().copy()
IT = np.abs(At)**2
if self.RW is not None:
X[:] = IT
plan_inverse()
x[:] *= self.RW
plan_forward()
RS = dt * self.fr * X
X[:] = At * ((1 - self.fr) * IT + RS)
M = plan_inverse()
else:
X[:] = At * IT
M = plan_inverse()
rv = 1j * self.gamma * self.W * M * np.exp(
-self.D * z)
return rv
Z = np.linspace(0, self.fiber_length, self.z_saves)
solution = scipy.integrate.solve_ivp(
rhs,
t_span=(0, self.fiber_length),
y0=np.fft.ifft(self.A) * self.scale,
t_eval=Z,
rtol=self.rtol,
atol=self.atol,
method=self.method)
AW = solution.y.T
progress_bar.close()
# Transform the results into the time domain
At = np.zeros(AW.shape, dtype=AW.dtype)
for i in range(len(AW[:, 0])):
AW[i, :] *= np.exp(np.transpose(
self.D) * Z[i]) / self.scale
At[i, :] = np.fft.fft(AW[i, :])
AW[i, :] = np.fft.fftshift(AW[i, :]) * self.N * dt
return Solution(self.t, self.Omega, Z, At, AW)
```
#### File: github/gnlse/nonlinearity.py
```python
import numpy as np
from scipy import interpolate
from gnlse.common import c
class Nonlinearity(object):
def gamma(V):
"""Calculate nonlinear coefficient
for given frequency grid created during simulation
Parameters
----------
V : ndarray, (N)
Frequency vector
Returns
-------
ndarray, (N)
Nonlinear coefficient in frequency domain
"""
raise NotImplementedError('Nonlinearity not implemented')
class NonlinearityFromEffectiveArea(Nonlinearity):
"""Calculate the nonlinearity coefficient in
frequency domain based on modified gnlse example form
<NAME>, "Mode profile dispersion in the generalized
nonlinear Schrödinger equation,"
Opt. Express 15, 16110-16123 (2007).
Attributes
----------
neff : ndarray (N)
Effective refractive index
Aeff : ndarray (N)
Effective mode area
lambdas : ndarray (N)
Wavelength corresponding to refractive index
central_wavelength : float
Wavelength corresponding to pump wavelength in nm
n2 : float
Nonlinear index of refraction in m^2/W
"""
def __init__(self, neff, Aeff, lambdas, central_wavelength,
n2=2.7e-20):
# refractive indices
self.neff = neff
# efective mode area in m^-2
self.Aeff = Aeff
# wavelengths for neffs in nm
self.lambdas = lambdas
# central frequency in 1/ps [THz]
self.w0 = (2.0 * np.pi * c) / central_wavelength
# nonlinear index of refraction in m^2/W
self.n2 = n2
def gamma(self, V):
# Central frequency [1/ps = THz]
omega = 2 * np.pi * c / self.lambdas
# Extrapolate effective mode area for a frequency vector
Aeff_interp = interpolate.interp1d(omega,
self.Aeff,
kind='cubic',
fill_value="extrapolate")
# Extrapolate effective mode area for a frequency vector
neff_interp = interpolate.interp1d(omega,
self.neff,
kind='cubic',
fill_value="extrapolate")
# Refractive index
neff = neff_interp(V + self.w0)
# and at central frequency
n0 = neff_interp(self.w0)
# Efective mode area
Aeff = Aeff_interp(V + self.w0)
# and at central frequency [1/m^2]
Aeff0 = Aeff_interp(self.w0)
gamma = self.n2 * self.w0 \
* n0 / c / 1e-9 / neff / np.sqrt(Aeff * Aeff0)
return gamma, np.power(Aeff0 / Aeff, 1. / 4)
``` |
{
"source": "Jieun-Enna/KoreanRecipeGPT",
"score": 4
} |
#### File: ratsnlp/nlpbook/data_utils.py
```python
import torch
def data_collator(features):
"""
Very simple data collator that:
- simply collates batches of dict-like objects
- Performs special handling for potential keys named:
- `label`: handles a single value (int or float) per object
- `label_ids`: handles a list of values per object
- does not do any additional preprocessing
i.e., Property names of the input object will be used as corresponding inputs to the model.
See glue and ner for example of how it's useful.
"""
# In this function we'll make the assumption that all `features` in the batch
# have the same attributes.
# So we will look at the first element as a proxy for what attributes exist
# on the whole batch.
if not isinstance(features[0], dict):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
dtype = torch.long if isinstance(label, int) else torch.float
batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], torch.Tensor):
batch["labels"] = torch.stack([f["label_ids"] for f in features])
else:
dtype = torch.long if type(first["label_ids"][0]) is int else torch.float
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
else:
batch[k] = torch.tensor([f[k] for f in features], dtype=torch.long)
return batch
```
#### File: ratsnlp/nlpbook/metrics.py
```python
import torch
def accuracy(preds, labels, ignore_index=None):
with torch.no_grad():
assert preds.shape[0] == len(labels)
correct = torch.sum(preds == labels)
total = torch.sum(torch.ones_like(labels))
if ignore_index is not None:
# 모델이 맞춘 것 가운데 ignore index에 해당하는 것 제외
correct -= torch.sum(torch.logical_and(preds == ignore_index, preds == labels))
# accuracy의 분모 가운데 ignore index에 해당하는 것 제외
total -= torch.sum(labels == ignore_index)
return correct.to(dtype=torch.float) / total.to(dtype=torch.float)
```
#### File: ratsnlp/nlpbook/utils.py
```python
import os
import sys
import tqdm
import logging
import requests
from transformers import HfArgumentParser
REMOTE_DATA_MAP = {
"nsmc": {
"train": {
"web_url": "https://github.com/e9t/nsmc/raw/master/ratings_train.txt",
"fname": "train.txt",
},
"val": {
"web_url": "https://github.com/e9t/nsmc/raw/master/ratings_test.txt",
"fname": "val.txt",
},
},
"klue-nli": {
"train": {
"googledrive_file_id": "18LhrHaPEW0VITMPfnwKXJ6bNuklBdi4U",
"fname": "klue_nli_train.json",
},
"val": {
"googledrive_file_id": "1UKIDAFOFuDSah7A66FZXSA8XUWUHhBAd",
"fname": "klue_nli_dev.json",
}
},
"ner": {
"train": {
"googledrive_file_id": "1RP764owqs1kZeHcjFnCX7zXt2EcjGY1i",
"fname": "train.txt",
},
"val": {
"googledrive_file_id": "1bEPNWT5952rD3xjg0LfJBy3hLHry3yUL",
"fname": "val.txt",
},
},
"korquad-v1": {
"train": {
"web_url": "https://korquad.github.io/dataset/KorQuAD_v1.0_train.json",
"fname": "KorQuAD_v1.0_train.json",
},
"val": {
"web_url": "https://korquad.github.io/dataset/KorQuAD_v1.0_dev.json",
"fname": "KorQuAD_v1.0_dev.json",
}
}
}
REMOTE_MODEL_MAP = {
"kogpt2": {
"merges": {
"googledrive_file_id": "19-vpk-RAPhmIM1pPJ66F2Kbj4dW5V5sV",
"fname": "merges.txt",
},
"vocab": {
"googledrive_file_id": "19vjuxYOmlNTfg8kYKOPOUlZERm-QoTnj",
"fname": "vocab.json",
},
"model": {
"googledrive_file_id": "1dDGtsMy1NsfpuvgX8XobBsCYyctn5Xex",
"fname": "pytorch_model.bin",
},
"config": {
"googledrive_file_id": "1z6obNRWPHoVrMzT9THElblebdovuDLUZ",
"fname": "config.json",
},
},
}
GOOGLE_DRIVE_URL = "https://docs.google.com/uc?export=download"
logger = logging.getLogger("ratsnlp") # pylint: disable=invalid-name
def save_response_content(response, save_path):
with open(save_path, "wb") as f:
content_length = response.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = tqdm.tqdm(
unit="B",
unit_scale=True,
total=total,
initial=0,
desc="Downloading",
disable=bool(logger.getEffectiveLevel() == logging.NOTSET),
)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
f.write(chunk)
progress.close()
def get_valid_path(cache_dir, save_fname, make_dir=True):
# 캐시 디렉토리 절대 주소 확인
if cache_dir.startswith("~"):
cache_dir = os.path.expanduser(cache_dir)
else:
cache_dir = os.path.abspath(cache_dir)
if make_dir:
os.makedirs(cache_dir, exist_ok=True)
valid_save_path = os.path.join(cache_dir, save_fname)
return valid_save_path
def google_download(file_id,
save_fname,
cache_dir="~/cache",
force_download=False):
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
valid_save_path = get_valid_path(cache_dir, save_fname)
# 캐시 파일이 있으면 캐시 사용
if os.path.exists(valid_save_path) and not force_download:
logger.info(f"cache file({valid_save_path}) exists, using cache!")
return valid_save_path
# init a HTTP session
session = requests.Session()
# make a request
response = session.get(GOOGLE_DRIVE_URL, params={'id': file_id}, stream=True)
# get confirmation token
token = get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(GOOGLE_DRIVE_URL, params=params, stream=True)
# download to disk
save_response_content(response, valid_save_path)
return valid_save_path
def web_download(url,
save_fname,
cache_dir="~/cache",
proxies=None,
etag_timeout=10,
force_download=False):
"""
download function. 허깅페이스와 SK T-BRAIN 다운로드 함수 참고.
https://github.com/huggingface/transformers/blob/master/src/transformers/file_utils.py
https://github.com/SKTBrain/KoBERT/blob/master/kobert/utils.py
"""
valid_save_path = get_valid_path(cache_dir, save_fname)
# 캐시 파일이 있으면 캐시 사용
if os.path.exists(valid_save_path) and not force_download:
logger.info(f"cache file({valid_save_path}) exists, using cache!")
return valid_save_path
# url 유효성 체크
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
etag = None
try:
response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)
if response.status_code == 200:
etag = response.headers.get("ETag")
except (EnvironmentError, requests.exceptions.Timeout):
pass
if etag is None:
raise ValueError(f"not valid URL({url}), cannot download resources")
response = requests.get(url, stream=True)
save_response_content(response, valid_save_path)
return valid_save_path
def download_downstream_dataset(args):
data_name = args.downstream_corpus_name.lower()
if data_name in REMOTE_DATA_MAP.keys():
cache_dir = os.path.join(args.downstream_corpus_root_dir, data_name)
for value in REMOTE_DATA_MAP[data_name].values():
if "web_url" in value.keys():
web_download(
url=value["web_url"],
save_fname=value["fname"],
cache_dir=cache_dir,
force_download=args.force_download,
)
else:
google_download(
file_id=value["googledrive_file_id"],
save_fname=value["fname"],
cache_dir=cache_dir,
force_download=args.force_download
)
else:
raise ValueError(f"not valid data name({data_name}), cannot download resources")
def download_pretrained_model(args, config_only=False):
pretrained_model_name = args.pretrained_model_name.lower()
if pretrained_model_name in REMOTE_MODEL_MAP.keys():
for key, value in REMOTE_MODEL_MAP[pretrained_model_name].items():
if not config_only or (config_only and key == "config"):
if "web_url" in value.keys():
web_download(
url=value["web_url"],
save_fname=value["fname"],
cache_dir=args.pretrained_model_cache_dir,
force_download=args.force_download,
)
else:
google_download(
file_id=value["googledrive_file_id"],
save_fname=value["fname"],
cache_dir=args.pretrained_model_cache_dir,
force_download=args.force_download,
)
else:
raise ValueError(f"not valid model name({pretrained_model_name}), cannot download resources")
def set_logger(args):
import torch
if torch.cuda.is_available():
stream_handler = logging.StreamHandler()
formatter = logging.Formatter(
fmt="%(levelname)s:%(name)s:%(message)s",
)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
logger.info("Training/evaluation parameters %s", args)
def set_seed(args):
if args.seed is not None:
# 향후 pytorch-lightning의 seed_everything까지 확장
from transformers import set_seed
set_seed(args.seed)
print(f"set seed: {args.seed}")
else:
print("not fixed seed")
def load_arguments(argument_class, json_file_path=None):
parser = HfArgumentParser(argument_class)
if json_file_path is not None:
args, = parser.parse_json_file(json_file=json_file_path)
elif len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
args, = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
args, = parser.parse_args_into_dataclasses()
return args
``` |
{
"source": "jieunyoo/directional-pixel-detectors",
"score": 2
} |
#### File: directional-pixel-detectors/2D_models/angleOnlyModel.py
```python
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
import os
#import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas import read_csv
import math
import seaborn as sns
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import CSVLogger
from keras.callbacks import EarlyStopping, ModelCheckpoint
df1 = pd.read_csv('recon650k.csv')
df2 = pd.read_csv('labels650k.csv')
X = df1.values
y = df2.values
n = 650000
X = np.reshape(X, (n,13,21,1))
#df2.head()
df2.drop('x-entry', axis=1, inplace=True)
df2.drop('y-entry', axis=1, inplace=True)
df2.drop('z-entry', axis=1, inplace=True)
df2.drop('n_x', axis=1, inplace=True)
df2.drop('n_y', axis=1, inplace=True)
df2.drop('n_z', axis=1, inplace=True)
df2.drop('number_eh_pairs', axis=1, inplace=True)
#df2.drop('cotAlpha', axis=1, inplace=True)
#df2.drop('cotBeta', axis=1, inplace=True)
print(df2.head())
#df2.shape
#reset y since you dropped columns
y = df2.values
df2.head()
#https://keras.io/api/callbacks/#csvlogger
#from https://keras.io/guides/writing_your_own_callbacks/
class CustomCallback(keras.callbacks.Callback):
def on_train_begin(self, logs=None):
keys = list(logs.keys())
print("Starting training; got log keys: {}".format(keys))
def on_train_end(self, logs=None):
keys = list(logs.keys())
print("Stop training; got log keys: {}".format(keys))
def on_epoch_begin(self, epoch, logs=None):
keys = list(logs.keys())
print("Start epoch {} of training; got log keys: {}".format(epoch, keys))
def on_epoch_end(self, epoch, logs=None):
keys = list(logs.keys())
print("End epoch {} of training; got log keys: {}".format(epoch, keys))
def on_test_begin(self, logs=None):
keys = list(logs.keys())
print("Start testing; got log keys: {}".format(keys))
def on_test_end(self, logs=None):
keys = list(logs.keys())
print("Stop testing; got log keys: {}".format(keys))
def on_predict_begin(self, logs=None):
keys = list(logs.keys())
print("Start predicting; got log keys: {}".format(keys))
def on_predict_end(self, logs=None):
keys = list(logs.keys())
print("Stop predicting; got log keys: {}".format(keys))
def on_train_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Training: start of batch {}; got log keys: {}".format(batch, keys))
def on_train_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Training: end of batch {}; got log keys: {}".format(batch, keys))
def on_test_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Evaluating: start of batch {}; got log keys: {}".format(batch, keys))
def on_test_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Evaluating: end of batch {}; got log keys: {}".format(batch, keys))
# def on_predict_batch_begin(self, batch, logs=None):
# keys = list(logs.keys())
# print("...Predicting: start of batch {}; got log keys: {}".format(batch, keys))
# def on_predict_batch_end(self, batch, logs=None):
# keys = list(logs.keys())
# print("...Predicting: end of batch {}; got log keys: {}".format(batch, keys))
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.20, random_state = 0)
print(X.shape, y.shape)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
#scale input data
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train.reshape(-1, X_train.shape[-1])).reshape(X_train.shape)
X_test = scaler.transform(X_test.reshape(-1, X_test.shape[-1])).reshape(X_test.shape)
#make a simple model
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), strides=(2, 2), activation='relu', input_shape=(13, 21, 1)))
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.Conv2D(64, (3, 3), strides=(2, 2), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(2, activation='linear'))
model.summary()
checkpoint_path = "cp.ckpt"
# Create a callback that saves the model's weights
# currently, model weights are saved for each training
# to do - update for early stopping
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
#https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/EarlyStopping
#patience: Number of epochs with no improvement after which training will be stopped
earlyStop_callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=20)
csv_logger = CSVLogger('log.csv', append=True, separator=';')
batch_size = 64
epochs = 200
model.compile(loss=keras.losses.MeanSquaredError(),
optimizer='adam',
metrics=['mean_squared_error'])
history = model.fit(
X_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(X_test, y_test),
callbacks=[cp_callback, csv_logger, earlyStop_callback],
)
res = model.evaluate(
X_test, y_test, batch_size=batch_size,
)
predictions = model.predict(X_test, batch_size=batch_size, callbacks=[CustomCallback()])
#save model
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
#save full model in hd5 format
model.save('my_model.h5')
print(len(predictions))
df_predict = pd.DataFrame(predictions, columns=['cotAlpha', 'cotBeta'])
print(df_predict)
df_predict.head()
trueLabels = pd.DataFrame(y_test, columns=['cotAlpha', 'cotBeta'])
plt.plot(history.history['loss'], label='train_loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.savefig('loss.png')
plt.plot(history.history['mean_squared_error'], label = 'MSE')
plt.savefig('mse.png')
df_predict.to_csv('predictions.csv')
trueLabels.to_csv('trueLabels.csv')
```
#### File: directional-pixel-detectors/3D_NN_cotBeta_models/bigDataGenTest.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
from numpy import asarray
from numpy import savetxt
import os
def parseFile(filein,nevents):
with open(filein) as f:
lines = f.readlines()
header = lines.pop(0).strip()
pixelstats = lines.pop(0).strip()
print("Header: ", header)
print("Pixelstats: ", pixelstats)
clusterctr = 0
b_getclusterinfo = False
cluster_truth =[]
timeslice = 0
# instantiate 4-d np array [cluster number, time slice, pixel row, pixel column]
cur_slice = []
cur_cluster = []
events = []
for line in lines:
#uncomment line.strip() to get the print out of each one
#print(line.strip())
## Get cluster truth information
if "<cluster>" in line:
# save the last time slice too
if timeslice > 0: cur_cluster.append(cur_slice)
cur_slice = []
timeslice = 0
b_getclusterinfo = True
# save the last cluster
if clusterctr > 0:
# print("len of cur_cluster = ", len(cur_cluster))
events.append(cur_cluster)
cur_cluster = []
#print("New cluster ",clusterctr)
clusterctr += 1
# Let's just look at the first 10 clusters for now
if clusterctr > nevents: break
continue
if b_getclusterinfo:
cluster_truth.append(line.strip().split())
b_getclusterinfo = False
## Put cluster information into np array
if "time slice" in line:
print("time slice ", timeslice, ", ", line.strip())
print("Length of cur_slice = ", len(cur_slice))
if timeslice > 0 and timeslice < 8: cur_cluster.append(cur_slice)
cur_slice = []
timeslice += 1
continue
if timeslice > 0 and b_getclusterinfo == False:
cur_row = line.strip().split()
# print(len(cur_row))
cur_slice.append([10*float(item) for item in cur_row])
print("Number of clusters = ", clusterctr)
print(cluster_truth)
print("Number of events = ",len(events))
print("Number of time slices in cluster = ", len(events[0]))
arr_truth = np.array(cluster_truth)
arr_events = np.array( events )
#convert into pandas DF
df = {}
#truth quantities - all are dumped to DF
df = pd.DataFrame(arr_truth, columns = ['x-entry', 'y-entry','z-entry', 'n_x', 'n_y', 'n_z', 'number_eh_pairs'])
df['n_x']=df['n_x'].astype(float)
df['n_y']=df['n_y'].astype(float)
df['n_z']=df['n_z'].astype(float)
df['cotBeta'] = df['n_y']/df['n_z']
df.drop('x-entry', axis=1, inplace=True)
df.drop('y-entry', axis=1, inplace=True)
df.drop('z-entry', axis=1, inplace=True)
df.drop('n_x', axis=1, inplace=True)
df.drop('n_y', axis=1, inplace=True)
df.drop('n_z', axis=1, inplace=True)
df.drop('number_eh_pairs', axis=1, inplace=True)
df.to_csv("labelsTest.csv", index=False)
return arr_events, arr_truth
def main():
#arr_events, arr_truth = parseFile(filein="pixel_clusters_d00000.out", nevents=10)
#use for test
arr_events, arr_truth = parseFile(filein="pixel_clusters_d16112.out", nevents=10)
print("The shape of the event array: ", arr_events.shape)
print("The ndim of the event array: ", arr_events.ndim)
print("The dtype of the event array: ", arr_events.dtype)
print("The size of the event array: ", arr_events.size)
print("The max value in the array is: ", np.amax(arr_events))
# print("The shape of the truth array: ", arr_truth.shape)
for i, e in enumerate(arr_events):
#integrated_cluster = np.sum(e,axis=0)
print("event number = ", i)
print("event array shape = ", e.shape)
os.chdir('/Users/jieunyoo/april12_3dCNN/figures-test')
path=os.getcwd()
os.mkdir('cluster'+str(i))
os.chdir('cluster'+str(i))
max_val = np.amax(e)
for j,s in enumerate(e):
np.save('event{0:006d}_frame{1:02d}.npy'.format(i,j),s)
path_parent = os.path.dirname(os.getcwd())
os.chdir(path_parent)
if __name__ == "__main__":
main()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
``` |
{
"source": "jieunyoo/hangulapp",
"score": 2
} |
#### File: migrations/versions/4c744e3adbf0_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4c744e3adbf0'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('quizquestions1',
sa.Column('question', sa.String(), nullable=True),
sa.Column('option1', sa.String(), nullable=True),
sa.Column('option2', sa.String(), nullable=True),
sa.Column('option3', sa.String(), nullable=True),
sa.Column('option4', sa.String(), nullable=True),
sa.Column('answer', sa.String(), nullable=True),
sa.Column('category', sa.String(), nullable=True),
sa.Column('questionid', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('questionid')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('default', sa.Boolean(), nullable=True),
sa.Column('permissions', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_index(op.f('ix_roles_default'), 'roles', ['default'], unique=False)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('location', sa.String(length=64), nullable=True),
sa.Column('about_me', sa.Text(), nullable=True),
sa.Column('member_since', sa.DateTime(), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.Column('avatar_hash', sa.String(length=32), nullable=True),
sa.Column('memberlevel', sa.Integer(), nullable=True),
sa.Column('memberexpirationdate', sa.DateTime(), nullable=True),
sa.Column('quizcount', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('quizzes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('quizscore', sa.Integer(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('answered', sa.PickleType(), nullable=True),
sa.Column('countquestions', sa.Integer(), nullable=True),
sa.Column('quizname', sa.String(length=128), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_quizzes_timestamp'), 'quizzes', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_quizzes_timestamp'), table_name='quizzes')
op.drop_table('quizzes')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_roles_default'), table_name='roles')
op.drop_table('roles')
op.drop_table('quizquestions1')
# ### end Alembic commands ###
``` |
{
"source": "jievince/nebula-python",
"score": 2
} |
#### File: nebula-python/nebula/AsyncClient.py
```python
import asyncio
from .Common import *
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from graph import AsyncGraphService
class AsyncGraphClient(object):
def __init__(self, ip, port, loop = None):
"""Initializer
Arguments: empty
Returns: empty
"""
self._loop = loop or asyncio.get_event_loop()
transport = TSocket.TSocket(ip, port)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
transport.open()
self._client = AsyncGraphService.Client(protocol)
self._iprot = protocol
self._session_id = 0
self._reqid_callback = {}
def get_loop(self):
return self._loop
def authenticate(self, user, password):
"""authenticate to graph server
Arguments:
- user: the user name
- password: the <PASSWORD>
Returns:
AuthResponse: the response of graph
AuthResponse's attributes:
- error_code
- session_id
- error_msg
"""
if self._client is None:
raise AuthException("No client")
try:
fut = self._client.authenticate(user, password)
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
self._client.recv_authenticate(self._iprot, mtype, rseqid)
resp = fut.result()
if resp.error_code == 0:
self._session_id = resp.session_id
return resp
except Exception as x:
raise AuthException("Auth failed: {}".format(x))
@asyncio.coroutine
def async_execute(self, statement, callback=None):
"""execute statement to graph server
Arguments:
- statement: the statement
Returns:
SimpleResponse: the response of graph
SimpleResponse's attributes:
- error_code
- error_msg
"""
if self._client is None:
raise ExecutionException("No client")
try:
fut = self._client.execute(self._session_id, statement)
if callback is None:
return
self._reqid_callback[self._client._seqid] = callback
yield from (asyncio.sleep(0))
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
self._client.recv_execute(self._iprot, mtype, rseqid)
resp = fut.result()
cb = self._reqid_callback.get(rseqid)
if cb is not None:
callback(SimpleResponse(resp.error_code, resp.error_msg))
self._reqid_callback.pop(rseqid)
except Exception as x:
raise ExecutionException("Execute `{}' failed: {}".format(statement, x))
@asyncio.coroutine
def async_execute_query(self, statement, callback=None):
"""execute query statement to graph server
Arguments:
- statement: the statement
Returns:
ExecutionResponse: the response of graph
ExecutionResponse's attributes:
- error_code
- latency_in_us
- error_msg
- column_names
- rows
- space_name
"""
if self._client is None:
raise ExecutionException("No client")
try:
fut = self._client.execute(self._session_id, statement)
if callback is None:
return
self._reqid_callback[self._client._seqid] = callback
yield from (asyncio.sleep(0))
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
self._client.recv_execute(self._iprot, mtype, rseqid)
resp = fut.result()
cb = self._reqid_callback.get(rseqid)
if cb is not None:
callback(resp)
self._reqid_callback.pop(rseqid)
except Exception as x:
raise ExecutionException("Execute `{}' failed: {}".format(statement, x))
def sign_out(self):
"""sign out: Users should call sign_out when catch the exception or exit
"""
if self._client is None:
return
try:
if self._session_id != 0:
self._client.signout(self._session_id)
except Exception as x:
raise Exception("SignOut failed: {}".format(x))
def is_none(self):
"""is_none: determine if the client creation was successful
Returns:
True or False
"""
return self._client is None
```
#### File: nebula-python/nebula/Common.py
```python
class AuthException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class ExecutionException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class SimpleResponse:
"""
Attributes:
- error_code
- error_msg
"""
def __init__(self, code, msg):
self.error_code = code
self.error_msg = msg
```
#### File: nebula-python/tests/test_client.py
```python
import pytest
import sys
import os
import time
import threading
sys.path.insert(0, '../')
from graph import ttypes
from nebula.ConnectionPool import ConnectionPool
from nebula.Client import GraphClient
from nebula.Common import *
def get_port():
return 3699
def create_pool(port):
return ConnectionPool('127.0.0.1', port)
@pytest.fixture(scope='module')
def get_pool():
return create_pool(get_port())
@pytest.fixture(scope='function')
def get_client(get_pool):
return GraphClient(get_pool)
def check_result(rows, expect):
if len(rows) != len(expect):
print('len(rows)[%d] != len(expect)[%d]' % (len(rows), len(expect)))
return False
for row, i in zip(rows, range(0, len(expect))):
for col, j in zip(row.columns, range(0, len(expect[i]))):
if col.getType() == ttypes.ColumnValue.__EMPTY__:
print('ERROR: type is empty')
return False
if col.getType() == ttypes.ColumnValue.BOOL_VAL:
if col.get_bool_val() != expect[i][j]:
print('result: %d, expect: %d' % (col.get_bool_val(), expect[i][j]))
return False
continue
if col.getType() == ttypes.ColumnValue.INTEGER:
if col.get_integer() != expect[i][j]:
print('result: %d, expect: %d' % (col.get_integer(), expect[i][j]))
return False
continue
if col.getType() == ttypes.ColumnValue.ID:
if col.get_id() != expect[i][j]:
print('result: %d, expect: %d' % (col.get_id(), expect[i][j]))
return False
continue
if col.getType() == ttypes.ColumnValue.STR:
if col.get_str().decode('utf-8') != expect[i][j]:
print('result: %s, expect: %s' % (col.get_str().decode('utf-8'),
expect[i][j]))
return False
continue
if col.getType() == ttypes.ColumnValue.DOUBLE_PRECISION:
if col.get_double_precision() != expect[i][j]:
print('result: %d, expect: %d' % (col.get_double_precision(),
expect[i][j]))
return False
continue
if col.getType() == ttypes.ColumnValue.TIMESTAMP:
if col.get_timestamp() != expect[i][j]:
print('result: %d, expect: %d' % (col.get_timestamp(),
expect[i][j]))
return False
continue
print('ERROR: Type unsupported')
return False
return True
def test_create_schema(get_client):
try:
client = get_client
if client is None:
assert False
return
resp = client.authenticate('user', 'password')
assert resp.error_code == 0, resp.error_msg
client.execute('DROP SPACE space1')
resp = client.execute('CREATE SPACE space1')
assert resp.error_code == 0, resp.error_msg
time.sleep(5)
count = 0
while count < 100:
resp = client.execute('USE space1')
if resp.error_code == 0:
break
print(resp.error_msg)
count += 1
resp = client.execute('CREATE TAG person(name string, age int)')
assert resp.error_code == 0, resp.error_msg
client.execute('CREATE EDGE like(likeness double)')
assert resp.error_code == 0, resp.error_msg
time.sleep(12)
resp = client.execute_query('SHOW TAGS')
assert resp.error_code == 0, resp.error_msg
assert len(resp.rows) == 1, resp.error_msg
assert resp.rows[0].columns[1].get_str().decode('utf-8') == 'person', resp.error_msg
resp = client.execute_query('SHOW EDGES')
assert resp.error_code == 0, resp.error_msg
assert len(resp.rows) == 1, resp.error_msg
assert resp.rows[0].columns[1].get_str().decode('utf-8') == 'like', resp.error_msg
time.sleep(10)
client.sign_out()
except Exception as ex:
print(ex)
client.sign_out()
assert False
def test_insert_data(get_client):
try:
client = get_client
if client is None:
assert False, "client is None"
return
resp = client.authenticate('user', 'password')
assert resp.error_code == 0, resp.error_msg
resp = client.execute('USE space1')
assert resp.error_code == 0, resp.error_msg
time.sleep(1)
resp = client.execute('INSERT VERTEX person(name, age) VALUES 1:(\'Bob\', 10)')
assert resp.error_code == 0, resp.error_msg
resp = client.execute('INSERT VERTEX person(name, age) VALUES 2:(\'Lily\', 9)')
assert resp.error_code == 0, resp.error_msg
resp = client.execute('INSERT VERTEX person(name, age) VALUES 3:(\'Tom\', 10)')
assert resp.error_code == 0, resp.error_msg
resp = client.execute('INSERT EDGE like(likeness) VALUES 1->2:(80.0)')
assert resp.error_code == 0, resp.error_msg
resp = client.execute('INSERT EDGE like(likeness) VALUES 1->3:(90.0)')
assert resp.error_code == 0, resp.error_msg
client.sign_out()
except Exception as ex:
print(ex)
client.sign_out()
assert False
def test_query_data(get_client):
try:
client = get_client
if client is None:
assert False
return
resp = client.authenticate('user', 'password')
if resp.error_code != 0:
print("ERROR: %s" % resp.error_msg)
assert resp.error_code == 0
time.sleep(1)
resp = client.execute('USE space1')
assert resp.error_code == 0
resp = client.execute_query('GO FROM 1 OVER like YIELD $$.person.name, '
'$$.person.age, like.likeness')
assert resp.error_code == 0
assert len(resp.rows) == 2
expect_result = [['Lily', 9, 80.0], ['Tom', 10, 90.0]]
assert check_result(resp.rows, expect_result)
client.sign_out()
except Exception as ex:
print(ex)
client.sign_out()
assert False
def test_multi_thread():
# Test multi thread
connection_pool = ConnectionPool('127.0.0.1', get_port())
global success_flag
success_flag = True
def main_test():
client = None
global success_flag
try:
client = GraphClient(connection_pool)
if client.is_none():
print("ERROR: None client")
success_flag = False
return
space_name = 'space_' + threading.current_thread().getName()
resp = client.authenticate('user', 'password')
if resp.error_code != 0:
raise AuthException('Auth failed')
client.execute('DROP SPACE %s' % space_name)
resp = client.execute('CREATE SPACE %s' % space_name)
if resp.error_code != 0:
raise ExecutionException('CREATE SPACE failed')
time.sleep(3)
count = 0
while count < 100:
resp = client.execute('USE %s' % space_name)
if resp.error_code == 0:
break
print(resp.error_msg)
count += 1
resp = client.execute('USE %s' % space_name)
if resp.error_code != 0:
raise ExecutionException('USE SPACE failed')
client.sign_out()
except Exception as x:
print(x)
client.sign_out()
success_flag = False
return
thread1 = threading.Thread(target=main_test, name='thread1')
thread2 = threading.Thread(target=main_test, name='thread2')
thread3 = threading.Thread(target=main_test, name='thread3')
thread4 = threading.Thread(target=main_test, name='thread4')
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread1.join()
thread2.join()
thread3.join()
thread4.join()
connection_pool.close()
assert success_flag
``` |
{
"source": "jiewufd/Quant",
"score": 3
} |
#### File: jiewufd/Quant/data_prepare.py
```python
import numpy as np
import pandas as pd
import collect as clct
import constants
import db_operations as dbop
def _check_int(arg):
if type(arg) != int:
raise ValueError("{} is not a int".format(arg))
def _check_iterable(arg):
if not hasattr(arg, "__iter__"):
raise ValueError("{} is not iterable".format(arg))
def _make_iterable(arg):
if type(arg) == str or not hasattr(arg, "__iter__"):
return [arg]
else:
return arg
def _prefix(prefix, df: pd.DataFrame, copy=False):
if copy:
df = df.copy()
df.columns = list(map(lambda col: str(prefix) + "_" + col, df.columns))
return df
def _move(days, df: pd.DataFrame, cols=None, prefix=True):
_check_int(days)
if cols is None:
cols = df.columns
cols = _make_iterable(cols)
if days > 0:
pre = "p{}mv".format(abs(days))
df_mv = df[cols].iloc[days:].copy()
df_mv.index = df.index[:-days]
else:
pre = "f{}mv".format(abs(days))
df_mv = df[cols].iloc[:days].copy()
df_mv.index = df.index[-days:]
if prefix:
return _prefix(pre, df_mv)
else:
return df_mv
def _rolling(rolling_type, days, df: pd.DataFrame, cols, move=0,
has_prefix=True):
_check_int(days)
cols = _make_iterable(cols)
period = abs(days)
if rolling_type == "max":
df_rolling = df[cols].rolling(window=abs(days)).max()
elif rolling_type == "min":
df_rolling = df[cols].rolling(window=abs(days)).min()
elif rolling_type == "mean":
df_rolling = df[cols].rolling(window=abs(days)).max()
else:
raise ValueError(
"rolling_type='{}' is not supported.".format(rolling_type))
if move != 0:
df_rolling = _move(move, df_rolling)
n = len(df_rolling)
idxes = df_rolling.index
if days > 0:
pre = "f" + str(abs(days)) + rolling_type
df_rolling = df_rolling.iloc[period - 1:n]
df_rolling.index = idxes[period - 1:n]
else:
pre = "p" + str(abs(days)) + rolling_type
df_rolling = df_rolling.iloc[period - 1:n]
if n - period + 1 >= 0:
df_rolling.index = idxes[:n - period + 1]
if has_prefix:
return _prefix(pre, df_rolling)
else:
return df_rolling
def _rolling_max(days, df: pd.DataFrame, cols, move=0, has_prefix=True):
_check_int(days)
cols = _make_iterable(cols)
period = abs(days)
df_rolling = df[cols].rolling(window=abs(days)).max()
if move != 0:
# print("--------",move)
# print(df_rolling[df["code"] == "600887.SH"]["high"].iloc[:30])
df_rolling = _move(move,
df_rolling) # print(df_rolling[df["code"] == "600887.SH"]["f1mv_high"].iloc[:30])
n = len(df_rolling)
idxes = df_rolling.index
if days > 0:
pre = "f" + str(abs(days)) + "max"
df_rolling = df_rolling.iloc[period - 1:n]
df_rolling.index = idxes[
period - 1:n] # df_rolling = df_rolling.iloc[period-1:n+move] # df_rolling.index = df.index[period-1-move:n]
else:
pre = "p" + str(abs(days)) + "max"
df_rolling = df_rolling.iloc[period - 1:n]
if n - period + 1 >= 0:
df_rolling.index = idxes[:n - period + 1]
# df_rolling = df_rolling.iloc[period-1+move:n] # df_rolling.index = df.index[:n-period+1-move]
if has_prefix:
return _prefix(pre, df_rolling)
else:
return df_rolling
def _rolling_min(days, df: pd.DataFrame, cols, move=0, has_prefix=True):
_check_int(days)
cols = _make_iterable(cols)
period = abs(days)
df_rolling = df[cols].rolling(window=abs(days)).min()
if move != 0:
# print("--------",move)
# print(df_rolling[df["code"] == "600887.SH"]["high"].iloc[:30])
df_rolling = _move(move,
df_rolling) # print(df_rolling[df["code"] == "600887.SH"]["f1mv_high"].iloc[:30])
n = len(df_rolling)
idxes = df_rolling.index
if days > 0:
pre = "f" + str(abs(days)) + "min"
df_rolling = df_rolling.iloc[period - 1:n]
df_rolling.index = idxes[period - 1:n]
else:
pre = "p" + str(abs(days)) + "min"
df_rolling = df_rolling.iloc[period - 1:n]
if n - period + 1 >= 0:
df_rolling.index = idxes[:n - period + 1]
if has_prefix:
return _prefix(pre, df_rolling)
else:
return df_rolling
def _rolling_mean(days, df: pd.DataFrame, cols, move=0, has_prefix=True):
_check_int(days)
cols = _make_iterable(cols)
period = abs(days)
df_rolling = df[cols].rolling(window=abs(days)).mean()
if move != 0:
df_rolling = _move(move, df_rolling)
n = len(df_rolling)
idxes = df_rolling.index
if days > 0:
pre = "f" + str(abs(days)) + "mean"
df_rolling = df_rolling.iloc[period - 1:n]
df_rolling.index = idxes[period - 1:n]
else:
pre = "p" + str(abs(days)) + "mean"
df_rolling = df_rolling.iloc[period - 1:n]
if n - period + 1 >= 0:
df_rolling.index = idxes[:n - period + 1]
if has_prefix:
return _prefix(pre, df_rolling)
else:
return df_rolling
def change_rate(df1: pd.DataFrame, df2: pd.DataFrame, cols1=None, cols2=None):
if cols1:
df1 = df1[cols1].copy()
if cols2:
df2 = df2[cols2].copy()
if df1.shape[1] != df2.shape[1]:
raise ValueError(
"Column length not the same:{0}!={1}".format(df1.shape[1],
df2.shape[1]))
df1 = df1.copy()
df1.columns = df2.columns
df3 = (df2 - df1) / df1
df3 = _prefix("change_rate", df3)
return df3
def create_df(cursor, table_name, start=None):
if start:
sql_select = "select * from {0} where date>='{1}'".format(table_name,
start)
else:
sql_select = "select * from {0}".format(table_name)
cursor.execute(sql_select)
df = pd.DataFrame(cursor.fetchall())
df.columns = dbop.cols_from_cur(cursor)
return df
def prepare_stck_d(df_stck_d):
df_stck_d = df_stck_d.set_index(["date"]).sort_index(ascending=False)
df_stck_d = df_stck_d[
["code", "open", "high", "low", "close", "vol", "amt", "adj_factor"]]
return df_stck_d
def prepare_idx_d(df_idx_d):
df_idx_d = df_idx_d.set_index("date").sort_index(ascending=False)
return df_idx_d
def prepare_each_stck(df_stck, qfq_type="hfq"):
if qfq_type and qfq_type not in ["hfq","qfq"]:
raise ValueError("qfq_type {} is not supported".format(qfq_type))
df_stck = df_stck.copy()
fq_cols = ["open", "high", "low", "close"]
for col in fq_cols:
df_stck[col+"0"] = df_stck[col]
# 后复权
if qfq_type=="qfq":
qfq_factor = np.array(df_stck["adj_factor"]
/ df_stck["adj_factor"].iloc[0])
# print(qfq_factor.shape)
qfq_factor = np.array(df_stck["adj_factor"]).reshape(-1, 1) * np.ones(
(1, len(fq_cols)))
# print(df_stck[fq_cols].dtypes)
# print(qfq_factor.shape, qfq_factor.dtype)
# print(df_stck[fq_cols]/qfq_factor)
df_stck.loc[:, fq_cols] = df_stck[fq_cols] * qfq_factor
return df_stck
def proc_stck_d(df_stck_d, pred_period=10):
df_stck_d = prepare_stck_d(df_stck_d)
df_stck_list = []
cols_move = ["open", "high", "low", "close", "amt"]
cols_roll = ["open", "high", "low", "close", "amt"]
fq_cols = ["open", "high", "low", "close"]
cols_future = None
for code, df in df_stck_d.groupby("code"):
df = df.sort_index(ascending=False)
df = prepare_each_stck(df)
df_label_min = _rolling_min(pred_period, df, "low", move=-1)
df_label_max = _rolling_max(pred_period - 1, df, "high", move=-2)
p1 = (pred_period - 1) // 3
p2 = p1
p3 = pred_period - 1 - p1 - p2
df_label_mean1 = _rolling_mean(p1, df, "open", move=-2)
df_label_mean2 = _rolling_mean(p2, df, "open", move=-2 - p1)
df_label_mean3 = _rolling_mean(p3, df, "open", move=-2 - p1 - p2)
# print(df_label_min.columns)
df_tomorrow = _move(-1, df, ["open", "high", "low", "close"])
# df_label_min = _rolling_min(pred_period,df,"low")
# if code == "000002.SZ":
# tmp = _rolling_min(-5,df,cols_roll).loc["2018-08-07"]
# print(tmp)
df_move_list = [change_rate(df[cols_move], _move(i, df, cols_move)) for
i in range(1, 6)]
df_qfq = df[fq_cols] / df["adj_factor"].iloc[0]
qfq_cols = ["qfq_"+col for col in fq_cols]
df_tomorrow_qfq = _move(-1, df_qfq)
df_rolling_list = [(change_rate(df[cols_roll],
_rolling_max(i, df, cols_roll)),
change_rate(df[cols_roll],
_rolling_min(i, df, cols_roll)),
change_rate(df[cols_roll],
_rolling_mean(i, df, cols_roll))) for i
in [-5, -10, -20, -60, -120, -250]]
df_roll_flat_list = []
for df_rolling_group in df_rolling_list:
df_roll_flat_list.extend(df_rolling_group)
df_labels = pd.concat(
[df_tomorrow,df_tomorrow_qfq, df_label_max, df_label_min, df_label_mean1,
df_label_mean2, df_label_mean3], axis=1, sort=False)
df_stck = pd.concat(
[df,df_qfq] + df_move_list + df_roll_flat_list + [df_labels], axis=1,
sort=False)
df_stck_list.append(df_stck)
if not cols_future:
cols_future = list(df_labels)
# print(tmp.shape)
# print(tmp[tmp[col_label].isnull()])
# if code == "002217.SZ":
# print(df[df.index == "2018-01-02"])
# print(df_stck[df_stck.index == "2018-01-02"])
df_stck_d_all = pd.concat(df_stck_list, sort=False)
# for df in df_stck_list:
# print(df["code"].unique(), df.shape)
# print(df["code"].unique(), df[df.index >= "2018-01-01"].shape)
print("count stck", len(
df_stck_d_all["code"][df_stck_d_all.index >= "2018-01-01"].unique()))
print(df_stck_d_all.shape)
return df_stck_d_all, cols_future
def proc_idx_d(df_idx_d: pd.DataFrame):
df_idx_d = prepare_idx_d(df_idx_d)
cols_move = ["open", "high", "low", "close", "vol"]
cols_roll = cols_move
df_idx_list = []
for name, group in df_idx_d.groupby("code"):
group = group.sort_index(ascending=False)
del group["code"]
df_move_list = [
change_rate(group[cols_move], _move(i, group, cols_move)) for i in
range(1, 6)]
df_rolling_list = [(change_rate(group[["high", "vol"]],
_rolling_max(i, group,
["high", "vol"])),
change_rate(group[["low", "vol"]],
_rolling_min(i, group,
["low", "vol"])),
change_rate(group[["open", "close", "vol"]],
_rolling_mean(i, group,
["open", "close",
"vol"]))) for i in
[-5, -10, -20, -60, -120, -250, -500]]
df_roll_flat_list = []
for df_rolling_group in df_rolling_list:
df_roll_flat_list.extend(df_rolling_group)
tmp_list = [group] + df_move_list + df_roll_flat_list
tmp = pd.concat(tmp_list, axis=1, sort=False)
df_idx_list.append(_prefix(name, tmp))
df_idx_d = pd.concat(df_idx_list, axis=1, sort=False)
return df_idx_d
def prepare_data(cursor, pred_period=10, start=None):
stock_day, index_day = constants.STOCK_DAY[clct.TABLE], constants.INDEX_DAY[
clct.TABLE]
print("start:",start)
df_stck_d = create_df(cursor, stock_day, start)
print("min_date",min(df_stck_d.date))
df_idx_d = create_df(cursor, index_day, start)
df_stck_d_all, cols_future = proc_stck_d(df_stck_d,
pred_period=pred_period)
print(df_stck_d_all.shape)
df_idx_d = proc_idx_d(df_idx_d)
print(df_idx_d.shape, len(df_idx_d.index.unique()))
df_all = df_stck_d_all.join(df_idx_d)
print(df_all.shape)
# print(df_all[(df_all.index == "2018-01-02") & (
# df_all["code"] == "002217.SZ")])
return df_all, cols_future
def feature_select(X, y):
import sklearn.ensemble as ensemble
clf = ensemble.ExtraTreesClassifier(random_state=0)
clf.fit(X, y)
import sklearn.feature_selection as fselect
model = fselect.SelectFromModel(clf, prefit=True)
X_new = model.transform(X)
print("selected feature number:", X_new.shape)
return X_new, model
def main():
db_type = "sqlite3"
#
# conn = dbop.connect_db(db_type)
# cursor = conn.cursor()
#
# pred_period=20
# df_all,cols_future = prepare_data(cursor,pred_period=pred_period,start="2011-01-01")
#
# # test
# # df_test = df_all[df_all["code"]=="600887.SH"]
# # basic_cols = ["open", "high", "low", "close", "amt", "adj_factor"]
# # derived_cols = ['change_rate_p1mv_open', 'change_rate_p1mv_high',
# # 'change_rate_p1mv_low', 'change_rate_p1mv_close',
# # 'change_rate_p1mv_amt', 'change_rate_p3mv_open',
# # 'change_rate_p3mv_high', 'change_rate_p3mv_low',
# # 'change_rate_p3mv_close', 'change_rate_p3mv_amt',
# # 'change_rate_p5mv_open', 'change_rate_p5mv_high',
# # 'change_rate_p5mv_low', 'change_rate_p5mv_close',
# # 'change_rate_p5mv_amt', 'change_rate_p5max_open',
# # 'change_rate_p5max_high', 'change_rate_p5max_low',
# # 'change_rate_p5max_close', 'change_rate_p5max_amt',
# # 'change_rate_p5min_open', 'change_rate_p5min_high',
# # 'change_rate_p5min_low', 'change_rate_p5min_close',
# # 'change_rate_p5min_amt', 'change_rate_p5mean_open',
# # 'change_rate_p5mean_high', 'change_rate_p5mean_low',
# # 'change_rate_p5mean_close', 'change_rate_p5mean_amt',
# # 'change_rate_p20max_open', 'change_rate_p20max_high',
# # 'change_rate_p20max_low', 'change_rate_p20max_close',
# # 'change_rate_p20max_amt', 'change_rate_p20min_open',
# # 'change_rate_p20min_high', 'change_rate_p20min_low',
# # 'change_rate_p20min_close', 'change_rate_p20min_amt',
# # 'change_rate_p20mean_open', 'change_rate_p20mean_high',
# # 'change_rate_p20mean_low', 'change_rate_p20mean_close',
# # 'change_rate_p20mean_amt', 'f1mv_open', 'f1mv_high',
# # 'f1mv_low', 'f1mv_close', 'f20max_f1mv_high',
# # 'sz50_open', 'sz50_high', 'sz50_low', 'sz50_close',
# # 'sz50_vol', 'sz50_change_rate_p1mv_open',
# # 'sz50_change_rate_p1mv_high',
# # 'sz50_change_rate_p1mv_low',
# # 'sz50_change_rate_p1mv_close',
# # 'sz50_change_rate_p1mv_vol']
# #
# # test_cols = basic_cols + derived_cols
# # print(test_cols)
# # df_test[test_cols].sort_index(ascending=False).iloc[:100].to_excel(
# # "test_data.xlsx",header=True,index=True)
#
#
#
#
# # # test
# # df_test_list = []
# # for code in df_all["code"].unique()[:3]:
# # df = df_all[df_all["code"]==code].sort_index(
# # ascending=False).iloc[:50]
# # print(df)
# # df_test_list.append(df)
# # pd.concat(df_test_list).to_excel("test_data.xlsx",header=True,index=True)
# #
# #
# import xgboost.sklearn as xgb
# import lightgbm.sklearn as lgbm
# import sklearn.metrics as metrics
# import matplotlib.pyplot as plt
# import time
# import sklearn.preprocessing as preproc
#
# period = (df_all.index >= "2014-01-01")
# df_all = df_all[period]
#
# df_all = df_all[df_all["amt"]!=0]
#
# y = gen_y(df_all, threshold=0.15, pred_period=pred_period)
# print("null:",sum(y.isnull()))
#
# features = df_all.columns.difference(cols_future+["code"])
#
#
# X = df_all[features]
#
#
# # X,y = drop_null(X,y)
# X = X[y.notnull()]
# X_full = df_all[y.notnull()]
# print("full and X",X.shape,X_full.shape)
# y = y.dropna()
# print(X.shape,y.shape)
# print("total positive", sum(y))
#
# condition = (X.index >= "2018-01-01")
# X_train, y_train = X[~condition], y[~condition]
# X_test, y_test = X[condition], y[condition]
#
# print(X_test.shape,y_test.shape)
# print("test positive:", sum(y_test))
#
# X_train_full = X_full.loc[condition]
# X_test_full = X_full.loc[condition]
#
# print(X_test_full.shape,X_test.shape)
# print(X_test_full[(X_test_full.index == "2018-01-02") & (X_test_full["code"]=="002217.SZ")].shape)
#
#
# # print(X_test_full["code"].iloc[np.array(y_test == 1)])
# # print(X_test_full[X_test_full["code"]=="002217.SZ"])
#
# # # scaler = preproc.StandardScaler()
# # # X_train = scaler.fit_transform(X_train)
# # # X_test = scaler.transform(X_test)
# #
# # # X_train,selector = feature_select(X_train,y_train)
# # # X_test = selector.transform(X_test)
# #
# #
# scale_pos_weight = sum(y==0)/sum(y==1)
#
# clfs = [
# lgbm.LGBMClassifier(n_estimators=300, scale_pos_weight=0.1,
# num_leaves=100, max_depth=8, random_state=0),
# xgb.XGBClassifier(n_estimators=300, scale_pos_weight=0.1,
# max_depth=5,
# random_state=0,),
# ]
#
# y_prd_list = []
# colors = ["r", "b"]
# for clf, c in zip(clfs, colors):
# t1 = time.time()
# clf.fit(X_train, y_train)
# t2 = time.time()
# y_prd_list.append([clf, t2 - t1, clf.predict_proba(X_test), c])
#
# for clf, t, y_prd_prob, c in y_prd_list:
# y_prd = np.where(y_prd_prob[:, 0] < 0.25, 1, 0)
# print(clf.classes_)
# print(y_prd.shape, sum(y_prd))
#
# print(X_test_full["code"].iloc[y_prd==1])
# # print(X_test_full["code"])
#
# print("accuracy", metrics.accuracy_score(y_test, y_prd))
# print("precison", metrics.precision_score(y_test, y_prd))
# print("recall", metrics.recall_score(y_test, y_prd))
# precision, recall, _ = metrics.precision_recall_curve(y_test, y_prd_prob[:, 1])
#
# plt.figure()
# plt.title(clf.__class__)
# plt.xlim(0, 1)
# plt.ylim(0, 1)
# plt.xlabel("recall")
# plt.ylabel("precision")
# plt.plot(recall, precision, color=c)
# print(clf, t)
#
# plt.show()
if __name__ == '__main__':
main()
```
#### File: jiewufd/Quant/db_operations.py
```python
import sqlite3
import pymysql
def connect_db(db:str):
if db=="mysql":
return pymysql.connect(host='127.0.0.1', user='root', passwd='<PASSWORD>',
db='quant', charset='utf8')
elif db == "sqlite3":
return sqlite3.connect("database\\stock.db")
else:
raise ValueError("{} not supported".format(db))
def close_db(conn):
conn.commit()
conn.close()
def _parse_config(path):
with open(path) as f:
split_symbol = "----"
config_str = "".join(f.readlines())
config_str = config_str.replace("\n","")
config_str = config_str.replace("\t"," ")
config_str = config_str.replace(split_symbol*2,split_symbol)
config_str = config_str.strip(split_symbol)
configs = dict([config.split("::") for config in config_str.split(split_symbol)])
return configs
def parse_config(path):
with open(path) as f:
split_symbol = "-"*4
config_str = "".join(f.readlines()).strip(split_symbol)
configs = []
for config_table in config_str.split(split_symbol):
configs.append(_parse_config_table(config_table))
return dict(configs)
def _parse_config_table(config_table:str):
split_symbol = ":"*2
table_name, config_tab_details = config_table.split(split_symbol)
table_name = table_name.replace("\n","").replace(" ","")
return table_name, _parse_config_tab_details(config_tab_details)
def _parse_config_tab_details(config_tab_details:str):
split_symbol = "-"*2
config_cols, config_others = config_tab_details.split(split_symbol)
pass
def cols_from_cur(cursor):
return tuple(desc[0] for desc in cursor.description)
if __name__ == '__main__':
cursor = connect_db("sqlite3").cursor()
cursor.execute("select * from stock_day where date>='2018-08-15'")
for row in cursor.fetchall():
print(row)
```
#### File: jiewufd/Quant/ml_model.py
```python
import os
import pickle
import re
import time
import lightgbm.sklearn as lgbm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.preprocessing as preproc
import xgboost.sklearn as xgb
from matplotlib import pyplot as plt
import db_operations as dbop
from data_prepare import prepare_data, feature_select
from constants import FLOAT_DELTA
def gen_data(pred_period=20, lower_bound="2011-01-01", start="2014-01-01"):
db_type = "sqlite3"
conn = dbop.connect_db(db_type)
cursor = conn.cursor()
df_all, cols_future = prepare_data(cursor, pred_period=pred_period,
start=lower_bound)
data_period = (df_all.index >= start)
df_all = df_all[data_period]
df_all = df_all[df_all["amt"] != 0]
return df_all, cols_future
def y_distribution(y):
y = y.copy().dropna()
# print distribution of y
print("before", sum(y < 0))
print("y<-0.5:", sum(y < -0.5))
for i in range(-5, 5):
tmp1 = ((i * 0.1) <= y)
tmp2 = (y < ((i + 1) * 0.1))
if len(tmp1) == 0 or len(tmp2) == 0:
tmp = [False]
else:
tmp = tmp1 & tmp2
print("{0:.2f}<=y<{1:.2f}:".format(i * 0.1, (i + 1) * 0.1), sum(tmp))
print("y>0.5", sum(y > 0.5))
print("after", sum(y < 0))
plt.figure()
plt.hist(y, bins=np.arange(-10, 11) * 0.1)
def gen_y(df_all: pd.DataFrame, pred_period=10, threshold=0.1, is_high=True,
is_clf=False):
target_col = get_target_col(pred_period, is_high)
y = df_all[target_col] / df_all["f1mv_open"] - 1
y_distribution(y)
# print(y[y.isna() & (df_all["f1mv_high"] == df_all["f1mv_low"])])
y[y.notnull() & (df_all["f1mv_high"] == df_all["f1mv_low"])] = 0
print("过滤涨停项:", sum(df_all["f1mv_high"] == df_all["f1mv_low"]))
return label(y, threshold=threshold, is_high=is_high,is_clf=is_clf)
def get_target_col(pred_period = 20,is_high = True):
if is_high:
target_col = "f{}max_f2mv_high".format(pred_period-1)
else:
target_col = "f{}min_f1mv_low".format(pred_period)
return target_col
def label(y, threshold=0.1, is_high=True, is_clf=False):
if is_clf:
if not is_high:
y = -y
y[y > threshold] = 1
y[y <= threshold] = 0
return y
def drop_null(X, y):
Xy = np.concatenate((np.array(X), np.array(y).reshape(-1, 1)), axis=1)
Xy = pd.DataFrame(Xy, index=X.index).dropna()
X = Xy.iloc[:, :-1].copy()
y = Xy.iloc[:, -1].copy()
return X, y
def gen_dataset(pred_period=20, lower_bound="2011-01-01", start="2014-01-01",
test_start="2018-01-01", is_high=True, is_clf=False, is_drop_null=False,
is_normalized=False, is_feature_selected=False):
"""
Generate training and testing data to be passed to train().
:param pred_period:
:param is_drop_null:
:param is_normalized:
:param is_feature_selected:
:return:
"""
df_all, cols_future = gen_data(pred_period, lower_bound, start)
y = gen_y(df_all, threshold=0.15, pred_period=pred_period, is_high=is_high,
is_clf=is_clf)
print("null:", sum(y.isnull()))
features = df_all.columns.difference(cols_future + ["code"])
X_full = df_all[y.notnull()]
X = X_full[features]
y = y.dropna()
if is_drop_null:
X, y = drop_null(X, y)
print("X_full,X,y:", X_full.shape, X.shape, y.shape)
print("total positive", sum(y))
test_period = (X.index >= test_start)
X_train, y_train = X[~test_period], y[~test_period]
X_test, y_test = X[test_period], y[test_period]
print(X_test.shape, y_test.shape)
print("test positive:", sum(y_test))
X_train_full = X_full[~test_period]
X_test_full = X_full[test_period]
print(X_test_full.shape, X_test.shape)
scaler = None
if is_normalized:
scaler = preproc.StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
selector = None
if is_feature_selected:
X_train, selector = feature_select(X_train, y_train)
X_test = selector.transform(X_test)
return {"train":(X_train,y_train),
"test":(X_test, y_test),
"full": (X_train_full, X_test_full),
"preproc":(scaler, selector)}
def gen_X(df_all: pd.DataFrame, cols_future, scaler=None, selector=None):
features = df_all.columns.difference(cols_future + ["code"])
X = df_all[features]
if scaler:
X = X.transform(X)
if selector:
X = X.transform(X)
return X
def train(data, models, is_clf=False):
X_train, y_train = data["train"]
y_pred_list = []
for model in models:
t1 = time.time()
model.fit(X_train, y_train)
t2 = time.time()
y_pred_list.append([model, t2 - t1])
print("training time:", t2-t1)
return y_pred_list
def pred_vs_real(inc:pd.DataFrame, y_pred):
x_min = -1
# Average for all.
y0 = inc["pct"].mean()
print(y0)
x0 = np.arange(x_min,11) * 0.1
y0 = np.ones(x0.shape) * y0
# prediction performance
df = pd.DataFrame(columns=["p0","range","cnt","min","mean","median","max","std"])
df = df.set_index(["p0"])
for i in range(-5,10):
p0 = i * 0.1
p1 = (i + 1) * 0.1
cond = (p0 < y_pred) & (y_pred < p1)
df.loc["{:.1f}".format(p0)] = ("{:.1f}-{:.1f}".format(p0,p1),
sum(cond),
inc["pct"][cond].min(),
inc["pct"][cond].mean(),
inc["pct"][cond].median(),
inc["pct"][cond].max(),
inc["pct"][cond].std())
if p0 > 0.3*FLOAT_DELTA and sum(cond)>0:
plt.figure()
plt.title(df.loc["{:.1f}".format(p0), "range"])
plt.hist(inc["pct"][cond], bins=5)
print(df)
plt.figure()
plt.title("real-pred")
cond_plt = y_pred<0.5*FLOAT_DELTA
plt.scatter(y_pred[cond_plt],inc["pct"][cond_plt])
# for p0_pred, c, p_real,s in zip(p_pred,cnt, y,std):
# print("{0:.1f}-{1:.1f}:".format(p0_pred,p0_pred+0.1),c, p_real, s)
print(sum([row["cnt"] * row["mean"] for p0, row in df.iterrows()
if float(p0) < 0.3*FLOAT_DELTA and row["cnt"]>0]))
plt.figure()
plt.bar(np.array(list(map(float,df.index))) + 0.05, df["mean"], width=0.08)
plt.plot(x0, y0, color='r')
# plt.xlim(-0.2, 1)
def save_model(model, pred_period=20, is_high=True):
suffix = "high" if is_high else "low"
f_name = re.search("\.([^.]*)'", str(type(model))).group(1)
f_name += "_{}".format(pred_period) + suffix
print(f_name)
with open(os.path.join(os.getcwd(), f_name), "wb") as f:
pickle.dump(model, f)
def load_model(model_type:str, pred_period=20, is_high=True):
suffix = "high" if is_high else "low"
f_name = model_type+"_{}".format(pred_period) + suffix
print(f_name)
with open(os.path.join(os.getcwd(), f_name), "rb") as f:
model = pickle.load(f)
return model
def train_save(pred_period = 20,is_high = True, is_clf=False):
data = gen_dataset(is_high=is_high,is_clf=is_clf,pred_period=pred_period)
if is_clf:
_, y_train=data["train"]
scale_pos_weight = sum(y_train == 0) / sum(y_train == 1)
if not is_clf:
models = [lgbm.LGBMRegressor(n_estimators=300, num_leaves=100, max_depth=8,random_state=0),
xgb.XGBRegressor(n_estimators=300, max_depth=5, random_state=0)]
else:
models = [lgbm.LGBMClassifier(n_estimators=300, scale_pos_weight=0.1,
num_leaves=100, max_depth=8, random_state=0),
xgb.XGBClassifier(n_estimators=300, scale_pos_weight=0.1,
max_depth=5, random_state=0, )]
y_pred_list = train(data, models, is_clf=is_clf)
# save model
for model in models:
save_model(model,pred_period,is_high)
return y_pred_list
def load_test(pred_period = 20,is_high = True, is_clf=False):
model_type = "XGBRegressor"
model = load_model(model_type,pred_period,is_high)
dataset = gen_dataset(
lower_bound="2015-01-01",start="2018-01-01",pred_period=pred_period, is_high=is_high,is_clf=is_clf)
X_test, y_test = dataset["test"]
_, X_test_full = dataset["full"]
target_col = get_target_col(pred_period,is_high)
inc = X_test_full[["code", "f1mv_open", target_col]].copy()
inc["pct"] = inc[target_col] / inc["f1mv_open"] - 1
y_pred = model.predict(X_test)
pred_vs_real(inc,y_pred)
plt.show()
if __name__ == '__main__':
# train_save(pred_period=5, is_high=True, is_clf=False)
# train_save(pred_period=5, is_high=False, is_clf=False)
# load_test(pred_period=5, is_high=False, is_clf=False)
load_test(pred_period=5, is_high=True, is_clf=False)
``` |
{
"source": "JIexa/complexity-recurrence",
"score": 4
} |
#### File: complexity-recurrence/graphs/strassen.py
```python
import numpy as np
import random
import os
counter = 0
def random_matrix(size):
lists = []
list = []
for j in range(size):
for i in range(size):
list.append(random.randint(1, 100))
lists.append(list)
list = []
return lists
def new_matrix(r, c):
"""Create a new matrix filled with zeros."""
matrix = [[0 for row in range(r)] for col in range(c)]
global counter
counter = counter + r*c
return matrix
def split(matrix):
"""Split matrix into quarters."""
a = b = c = d = matrix
global counter
while len(a) > len(matrix)/2:
a = a[:len(a)//2]
b = b[:len(b)//2]
c = c[len(c)//2:]
d = d[len(d)//2:]
counter = counter + 1
while len(a[0]) > len(matrix[0])//2:
for i in range(len(a[0])//2):
counter = counter + 1
a[i] = a[i][:len(a[i])//2]
b[i] = b[i][len(b[i])//2:]
c[i] = c[i][:len(c[i])//2]
d[i] = d[i][len(d[i])//2:]
return a, b, c, d
# matrix = np.array(matrix)
# # print(matrix.shape)
# row, col = matrix.shape
# row2, col2 = row//2, col//2
# return matrix[:row2, :col2], matrix[:row2, col2:], matrix[row2:, :col2], matrix[row2:, col2:]
def add_matrix(a, b):
global counter
if type(a) == int:
d = a + b
else:
d = []
for i in range(len(a)):
c = []
for j in range(len(a[0])):
c.append(a[i][j] + b[i][j])
counter = counter + 1
d.append(c)
return d
def subtract_matrix(a, b):
global counter
if type(a) == int:
d = a - b
else:
d = []
for i in range(len(a)):
c = []
for j in range(len(a[0])):
c.append(a[i][j] - b[i][j])
counter = counter + 1
d.append(c)
return d
def strassen(x, y, n):
# base case: 1x1 matrix
global counter
if n == 1:
z = [[0]]
z[0][0] = x[0][0] * y[0][0]
counter = counter + 1
return z
else:
# split matrices into quarters
a, b, c, d = split(x)
e, f, g, h = split(y)
# p1 = a*(f-h)
p1 = strassen(a, subtract_matrix(f, h), n/2)
# p2 = (a+b)*h
p2 = strassen(add_matrix(a, b), h, n/2)
# p3 = (c+d)*e
p3 = strassen(add_matrix(c, d), e, n/2)
# p4 = d*(g-e)
p4 = strassen(d, subtract_matrix(g, e), n/2)
# p5 = (a+d)*(e+h)
p5 = strassen(add_matrix(a, d), add_matrix(e, h), n/2)
# p6 = (b-d)*(g+h)
p6 = strassen(subtract_matrix(b, d), add_matrix(g, h), n/2)
# p7 = (a-c)*(e+f)
p7 = strassen(subtract_matrix(a, c), add_matrix(e, f), n/2)
z11 = add_matrix(subtract_matrix(add_matrix(p5, p4), p2), p6)
z12 = add_matrix(p1, p2)
z21 = add_matrix(p3, p4)
z22 = add_matrix(subtract_matrix(subtract_matrix(p5, p3), p7), p1)
z = new_matrix(len(z11)*2, len(z11)*2)
for i in range(len(z11)):
for j in range(len(z11)):
z[i][j] = z11[i][j]
z[i][j+len(z11)] = z12[i][j]
z[i+len(z11)][j] = z21[i][j]
z[i+len(z11)][j+len(z11)] = z22[i][j]
counter = counter + 1
return z
if __name__ == '__main__':
for i in range(100):
size = 2 ** random.randint(1, 5)
depth = 0
x = random_matrix(size)
y = random_matrix(size)
path = "./strassen"
try:
os.mkdir(path)
except OSError as error:
pass
strassen(x, y, size)
with open("./strassen/traces", 'a') as f:
print("{};{}".format(size, counter), file=f)
counter = 0
```
#### File: JIexa/complexity-recurrence/recurrence_solver.py
```python
import math
import argparse
def master_theorem(a, b, k, p=0):
'''T(n) = a T(n/b) + (n^k(logn)^p)'''
assert(isinstance(a, int)), a
assert(isinstance(b, int)), b
assert(isinstance(k, int)), k
assert(isinstance(p, int)), p
c = math.log(a,b) if b != 1 else 0
if a > b**k:
complexity = "n^{}".format(c)
elif a == b**k:
if p > -1:
complexity = "n^{} (logn)^{}".format(c, p+1) if p!=0 else "n^{} logn".format(c)
elif p == -1:
complexity = "n^{} log(logn)".format(c)
elif p < -1:
complexity = "n^{}".format(c)
elif a < b**k:
if p >= 0:
complexity = "n^{} (logn)^{}".format(k, p)
else:
complexity = "n^{}".format(c)
return complexity
def recurrence(a, b, k, p, rec_call):
'''T(n) = T(n-a) + T(n-b) + fn'''
assert(isinstance(a, int)), a
assert(isinstance(b, int)), b
assert(a!=0 or b!=0), "Invalid recurrence relation"
if rec_call > 2:
complexity = "{}^n".format(rec_call)
elif a != 0 and b != 0:
complexity = "2^n"
elif a != 0 and b == 0:
if p==0:
complexity = "n^{}".format(k+1)
else:
complexity = "n^{}(logn)^{}".format(k+1, p)
else:
assert false, "failed to solve this recurrence relation"
return complexity
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="recurrence_solver.py")
parser.add_argument('-format', help="1: T(n) = a T(n/b) + n^k(logn)^p\n2: T(n) = a T(n-b) + n^k(logn)^p\n3: T(n) = T(n/a) + T(n/b) + fn")
parser.add_argument('-a')
parser.add_argument('-b')
parser.add_argument('-k')
parser.add_argument('-p')
parser.add_argument('-rec_call')
args = parser.parse_args()
fmt = int(args.format)
a = int(args.a)
b = int(args.b)
k = int(args.k)
p = int(args.p)
rec_call = int(args.rec_call)
if fmt == 1:
complexity = master_theorem(a, b, k, p)
elif fmt == 2:
complexity = recurrence(a, b, k, p, rec_call)
elif fmt == 3: # T(n) = T(n/a) + T(n/b) + fn
complexity = master_theorem(1, a, k, p) if a<b else masther_theorem(1, b, k, p)
print("Complexity is O({})".format(complexity))
```
#### File: recursive_programs/python/reverse_stack.py
```python
import random
import os
counter = 0
def random_list(size):
list = []
for i in range(size):
list.append(random.randint(-10000, 10000))
return list
def insertAtBottom(stack, item, depth):
if depth == 0:
global counter
counter = counter + 1
if isEmpty(stack):
push(stack, item)
else:
temp = pop(stack)
insertAtBottom(stack, item, depth)
push(stack, temp)
def reverse(stack, depth, file):
with open(file, 'a') as f:
print("{};{}".format(depth, len(stack)), file=f)
if depth == 0:
global counter
counter = counter + 1
if not isEmpty(stack):
temp = pop(stack)
reverse(stack, depth+1, file)
insertAtBottom(stack, temp, depth)
def isEmpty( stack ):
return len(stack) == 0
def push( stack, item ):
stack.append( item )
def pop( stack ):
if(isEmpty( stack )):
print("Stack Underflow ")
exit(1)
return stack.pop()
# Driver collect traces
def main():
global counter
counter = 0
size = random.randint(1, 500)
arr = random_list(size)
depth = 0
path = "./reverse_stack"
try:
os.mkdir(path)
except OSError as error:
pass
file = "./reverse_stack/output-{}".format(size)
reverse(arr, depth, file)
with open("./reverse_stack/traces", 'a') as f:
print("{};{}".format(size, counter), file=f)
counter = 0
if __name__ == '__main__':
for i in range(100):
main()
```
#### File: recursive_programs/python/tree_traversal.py
```python
import random
import os
counter = 0
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def insert(self, data):
if self.data:
if data < self.data:
# if size(self.left) < size(self.right):
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
elif data > self.data:
# else:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
# assert(abs(size(self.right)-size(self.left)) <= 1)
def populate(self, size):
for i in range(size):
val = random.randint(-size, size)
self.insert(val)
return
def PrintTree(self):
if self.left:
self.left.PrintTree()
print( self.data),
if self.right:
self.right.PrintTree()
def size(node):
if node is None:
return 0
else:
return (size(node.left) + 1 + size(node.right))
# Recursive function to perform postorder traversal on the tree
def postorder(root, depth, file):
with open(file, 'a') as f:
print("{};{}".format(depth, size(root)), file=f)
if depth==0:
global counter
counter = counter + 1
if root is None:
return
postorder(root.left, depth+1, file)
postorder(root.right, depth+1, file)
# print(root.data, end=' ')
# Recursive function to perform inorder traversal on the tree
def inorder(root, depth, file):
with open(file, 'a') as f:
print("{};{}".format(depth, size(root)), file=f)
if depth==0:
global counter
counter = counter + 1
if root is None:
return
inorder(root.left, depth+1, file)
# print(root.data, end=' ')
inorder(root.right, depth+1, file)
# Recursive function to copy the tree
def copy(root, height, depth, file):
with open(file, 'a') as f:
print("{};{}".format(depth, height), file=f)
if depth==0:
global counter
counter = counter + 1
if root is None:
return
# if root.left:
copy(root.left, height+1, depth+1, file)
# if root.right:
copy(root.right, height+1, depth+1, file)
# Driver collect traces
def main():
global counter
counter = 0
depth = 0
root = Node(random.randint(-10,10))
root.populate(random.randint(2,500))
siz = size(root)
path = "./postorder_ubt"
try:
os.mkdir(path)
except OSError as error:
pass
file = "./postorder_ubt/output-{}".format(siz)
postorder(root, depth, file)
with open("./postorder_ubt/traces", 'a') as f:
print("{};{}".format(siz, counter), file=f)
counter = 0
path = "./inorder_ubt"
try:
os.mkdir(path)
except OSError as error:
pass
file = "./inorder_ubt/output-{}".format(siz)
inorder(root, depth, file)
with open("./inorder_ubt/traces", 'a') as f:
print("{};{}".format(siz, counter), file=f)
counter = 0
path = "./bstcopy_ubt"
try:
os.mkdir(path)
except OSError as error:
pass
file = "./bstcopy_ubt/output-{}".format(siz)
height = 1
copy(root, height, depth, file)
with open("./bstcopy_ubt/traces", 'a') as f:
print("{};{}".format(siz, counter), file=f)
counter = 0
if __name__ == '__main__':
for i in range(100):
main()
``` |
{
"source": "JIEXAO/incubator-mxnet",
"score": 2
} |
#### File: incubator-mxnet/python/Multi_GPU.py
```python
import d2lzh as d2l
import mxnet as mx
from mxnet import autograd, gluon, init, nd
from mxnet.gluon import loss as gloss, nn, utils as gutils
import time
from mpi4py import MPI
##先做多机单卡
def resnet18(num_classes):
def resnet_block(num_channels, num_residuals, first_block=False):
blk = nn.Sequential()
for i in range(num_residuals):
if i == 0 and not first_block:
blk.add(d2l.Residual(
num_channels, use_1x1conv=True, strides=2))
else:
blk.add(d2l.Residual(num_channels))
return blk
net = nn.Sequential()
net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1),
nn.BatchNorm(), nn.Activation('relu'))
net.add(resnet_block(64, 2, first_block=True),
resnet_block(128, 2),
resnet_block(256, 2),
resnet_block(512, 2))
net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes))
return net
def train(num_gpus, batch_size, lr):
comm = MPI.COMM_WORLD
comm_rank = comm.Get_rank()
comm_size = comm.Get_size()
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
#ctx = [mx.gpu(i) for i in range(num_gpus)]
if comm_rank == 0:
ctx = mx.gpu(0)
else:
ctx = mx.gpu(1)
print('running on:', ctx)
net.initialize(init=init.Normal(sigma=0.01), ctx=ctx, force_reinit=True)
trainer = gluon.Trainer(
net.collect_params(), 'sgd', {'learning_rate': lr}, SSP_FLAG=True, thre = 2)
loss = gloss.SoftmaxCrossEntropyLoss()
for epoch in range(400000):
start = time.time()
for X, y in train_iter:
gpu_Xs = gutils.split_and_load(X, ctx)
gpu_ys = gutils.split_and_load(y, ctx)
with autograd.record():
ls = [loss(net(gpu_X), gpu_y)
for gpu_X, gpu_y in zip(gpu_Xs, gpu_ys)]
for l in ls:
l.backward()
trainer.step(epoch, batch_size)
train_time = time.time() - start
test_acc = d2l.evaluate_accuracy(test_iter, net, ctx[comm_rank])
print('epoch %d, time %.1f sec, test acc %.2f, process %d' % (
epoch + 1, train_time, test_acc, comm_rank))
net = resnet18(10)
train(num_gpus=1, batch_size=256, lr=0.1)
#train(num_gpus=2, batch_size=512, lr=0.2)
```
#### File: python/unittest/test_numpy_op.py
```python
from __future__ import absolute_import
import numpy as _np
import mxnet as mx
from mxnet import np, npx
from mxnet.base import MXNetError
from mxnet.gluon import HybridBlock
from mxnet.base import MXNetError
from mxnet.test_utils import same, assert_almost_equal, rand_shape_nd, rand_ndarray
from mxnet.test_utils import check_numeric_gradient, use_np
from common import assertRaises, with_seed
import random
import collections
@with_seed()
@use_np
def test_np_sum():
class TestSum(HybridBlock):
def __init__(self, axis=None, dtype=None, keepdims=False):
super(TestSum, self).__init__()
self._axis = axis
self._dtype = dtype
self._keepdims = keepdims
def hybrid_forward(self, F, a, *args, **kwargs):
return F.np.sum(a, axis=self._axis, dtype=self._dtype, keepdims=self._keepdims)
def is_int(dtype):
return 'int' in dtype
in_data_dim = random.choice([2, 3, 4])
shape = rand_shape_nd(in_data_dim, dim=3)
acc_type = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64',
'int8': 'int32', 'int32': 'int64', 'int64': 'int64'}
for hybridize in [False, True]:
for keepdims in [True, False]:
for axis in ([i for i in range(in_data_dim)] + [(), None]):
for itype in ['float16', 'float32', 'float64', 'int8', 'int32', 'int64']:
for dtype in ['float16', 'float32', 'float64', 'int8', 'int32', 'int64']:
if is_int(dtype) and not is_int(itype):
continue
# test gluon
test_sum = TestSum(axis=axis, dtype=dtype, keepdims=keepdims)
if hybridize:
test_sum.hybridize()
if is_int(itype):
x = _np.random.randint(-128, 128, shape, dtype=itype)
x = mx.nd.array(x)
else:
x = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype=itype)
x = x.as_np_ndarray()
x.attach_grad()
expected_ret = _np.sum(x.asnumpy(), axis=axis, dtype=acc_type[itype], keepdims=keepdims)
expected_ret = expected_ret.astype(dtype)
with mx.autograd.record():
y = test_sum(x)
assert y.shape == expected_ret.shape
assert_almost_equal(y.asnumpy(), expected_ret, rtol=1e-3 if dtype == 'float16' else 1e-3,
atol=1e-5 if dtype == 'float16' else 1e-5)
y.backward()
assert same(x.grad.asnumpy(), _np.ones(shape=x.shape, dtype=x.dtype))
# test numeric
if itype == 'float32' and dtype == 'float32':
x_sym = mx.sym.Variable("x").as_np_ndarray()
mx_sym = mx.sym.np.sum(x_sym, axis=axis, dtype=dtype, keepdims=keepdims).as_nd_ndarray()
check_numeric_gradient(mx_sym, [x.as_nd_ndarray()],
numeric_eps=1e-3, rtol=1e-3, atol=1e-4, dtype=_np.float32)
# test imperative
mx_out = np.sum(x, axis=axis, dtype=dtype, keepdims=keepdims)
np_out = _np.sum(x.asnumpy(), axis=axis, dtype=acc_type[itype], keepdims=keepdims).astype(dtype)
assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5)
if __name__ == '__main__':
import nose
nose.runmodule()
``` |
{
"source": "jiexiang1972/vnpy",
"score": 2
} |
#### File: jiexiang1972/vnpy/runDataRecoderGUI.py
```python
from vnpy.event import EventEngine
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import MainWindow, create_qapp
from vnpy.gateway.weboption import WeboptionGateway
from vnpy.app.data_recorder import DataRecorderApp
def main():
"""Start VN Trader"""
qapp = create_qapp()
event_engine = EventEngine()
main_engine = MainEngine(event_engine)
main_engine.add_gateway(WeboptionGateway)
main_engine.add_app(DataRecorderApp)
main_window = MainWindow(main_engine, event_engine)
main_window.showMaximized()
qapp.exec()
if __name__ == "__main__":
main()
``` |
{
"source": "JiexingQi/picard",
"score": 3
} |
#### File: seq2seq/preprocess/add_coref_to_dataset_tmp.py
```python
import spacy
from spacy.tokens import Doc
import coreferee
import json
from tqdm import tqdm
import os
class WhitespaceTokenizer:
def __init__(self, vocab):
self.vocab = vocab
def __call__(self, text):
words = text.split(" ")
spaces = [True] * len(words)
# Avoid zero-length tokens
for i, word in enumerate(words):
if word == "":
words[i] = " "
spaces[i] = False
# Remove the final trailing space
if words[-1] == " ":
words = words[0:-1]
spaces = spaces[0:-1]
else:
spaces[-1] = False
return Doc(self.vocab, words=words, spaces=spaces)
def init_nlp():
nlp = spacy.load('en_core_web_trf')
nlp.tokenizer = WhitespaceTokenizer(nlp.vocab)
nlp.add_pipe('coreferee')
return nlp
def find_turn_idx(id, turn_list, len_text_list):
total_length = 0
for idx, length in enumerate(len_text_list):
total_length += length
if id < total_length and id >= total_length-length:
return turn_list[idx], id-total_length+length
return turn_list[0], id
def text_list2coref_json(output_path, mode, nlp):
with open(os.path.join(output_path, f"{mode}_text_list.txt"), 'r') as load_f:
dataset = load_f.readlines()
new_res=[]
for idx, entry in tqdm(enumerate(dataset)):
final_preprocessed_text_list = eval(dataset[idx].strip())
text_list = " ".join([i for item in final_preprocessed_text_list for i in item[1]])
turn_list = [item[0] for item in final_preprocessed_text_list]
len_text_list = [item[2] for item in final_preprocessed_text_list]
doc = nlp(text_list)
coref_dict = {}
for chain in doc._.coref_chains:
key = chain.index
used_turn = set()
coref_dict[key] = {}
coref_dict[key]["group"] = []
for li in [list(_) for _ in chain]:
new_list = []
for idx in li:
item = find_turn_idx(idx, turn_list, len_text_list)
item_dict = {"turn": item[0], "position": item[1], "ori": idx}
used_turn.add(item[0])
new_list.append(item_dict)
coref_dict[key]["group"].append(new_list)
coref_dict[key]["used_turn"] = list(used_turn)
new_entry = {}
new_entry["coref"] = coref_dict
new_entry["text_list"] = text_list
new_res.append(new_entry)
with open(os.path.join(output_path, f'{mode}_coref.json'),"w") as dump_f:
json.dump(new_res,dump_f)
def main():
mode_list = ["train", "dev"]
dataset_name_list = ["spider", "cosql"]
nlp = init_nlp()
for datatset_name in dataset_name_list:
for mode in mode_list:
output_path = os.path.join("../../dataset_files/preprocessed_dataset/", datatset_name)
text_list2coref_json(output_path, mode, nlp)
main()
```
#### File: seq2seq/preprocess/get_relation2id_dict.py
```python
def get_relation2id_dict(choice = "Default", use_coref = False, use_dependency = False):
from .constants import RELATIONS, MAX_RELATIVE_DIST
current_relation = [r for r in RELATIONS]
if not use_coref:
current_relation = [r for r in current_relation if r not in ['co_relations', 'coref_relations']]
if not use_dependency:
current_relation = [r for r in current_relation if r not in ['Forward-Syntax', 'Backward-Syntax', 'None-Syntax']]
if choice in ["Default"]:
idx_list = [i for i in range(1, len(current_relation)+1)]
elif choice == "DefaultWithoutSchemaEncoding":
schema_encoding_rel = []
for rel in current_relation:
split_rel = rel.split("-")
try:
src_type, tgt_type = split_rel[0], split_rel[1]
except:
continue
if src_type in ["table", "column", "*"] and tgt_type in ["table", "column", "*"]:
schema_encoding_rel.append(rel)
current_relation = [r for r in current_relation if r not in schema_encoding_rel]
idx_list = [i for i in range(1, len(current_relation)+1)]
for rel in schema_encoding_rel:
current_relation.append(rel)
idx_list.append(0)
elif choice == "DefaultWithoutSchemaLinking":
schema_linking_rel = []
for rel in current_relation:
split_rel = rel.split("-")
try:
src_type, tgt_type = split_rel[0], split_rel[1]
except:
continue
if (src_type in ["question"] and tgt_type in ["table", "column", "*"]) or (tgt_type in ["question"] and src_type in ["table", "column", "*"]):
schema_linking_rel.append(rel)
current_relation = [r for r in current_relation if r not in schema_linking_rel]
idx_list = [i for i in range(1, len(current_relation)+1)]
for rel in schema_linking_rel:
current_relation.append(rel)
idx_list.append(0)
elif choice == "MinType":
idx_list = []
dummy_idx = 8
for rel in current_relation:
if rel in ['question-column-partialmatch', 'question-table-partialmatch']:
idx_list.append(1)
elif rel in ['question-column-exactmatch', 'question-table-exactmatch']:
idx_list.append(2)
elif rel in ['question-column-valuematch']:
idx_list.append(3)
elif rel in ['question-table-nomatch', 'question-column-nomatch']:
idx_list.append(4)
elif rel in ['table-column-pk']:
idx_list.append(5)
elif rel in ['table-column-has']:
idx_list.append(6)
elif rel in ['column-column-fk']:
idx_list.append(7)
elif rel in ['question-question-generic'] + ['question-question-dist' + str(i) if i != 0 else 'question-question-identity' for i in range(- MAX_RELATIVE_DIST, MAX_RELATIVE_DIST + 1)]:
idx_list.append(dummy_idx)
dummy_idx += 1
else:
idx_list.append(0)
elif choice == "Dependency_MinType":
idx_list = []
dummy_idx = 8
for rel in current_relation:
if rel in ['question-column-partialmatch', 'question-table-partialmatch']:
idx_list.append(1)
elif rel in ['question-column-exactmatch', 'question-table-exactmatch']:
idx_list.append(2)
elif rel in ['question-column-valuematch']:
idx_list.append(3)
elif rel in ['question-table-nomatch', 'question-column-nomatch']:
idx_list.append(4)
elif rel in ['table-column-pk']:
idx_list.append(5)
elif rel in ['table-column-has']:
idx_list.append(6)
elif rel in ['column-column-fk']:
idx_list.append(7)
elif rel in ['Forward-Syntax', 'Backward-Syntax', 'None-Syntax']:
idx_list.append(dummy_idx)
dummy_idx += 1
else:
idx_list.append(0)
else:
raise NotImplementedError
RELATION2ID_DICT = dict(zip(current_relation, idx_list))
idx_list.append(0)
current_relation.append("None")
ID2RELATION_DICT = dict(zip(idx_list, current_relation))
return RELATION2ID_DICT, ID2RELATION_DICT, max(idx_list)
```
#### File: seq2seq/preprocess/process_dataset.py
```python
import os, json, pickle, time
from tqdm import tqdm
import fcntl
from .common_utils import Preprocessor
from .align_tables import align_tables_by_dataset_name
def process_tables(processor, tables_list, output_path=None):
tables = {}
for idx, each in tqdm(enumerate(tables_list)):
tables[each['db_id']] = processor.preprocess_database(each)
print('In total, process %d databases .' % (len(tables)))
if output_path is not None:
with open (output_path, 'wb') as dump_f:
fcntl.flock(dump_f.fileno(), fcntl.LOCK_EX)
pickle.dump(tables, dump_f)
return tables
def process_dataset(processor, dataset, tables, dataset_name, mode, output_path_base=None, used_coref=False):
processed_dataset = []
# if dataset_name == "cosql":
# wfile = open(f"./dataset_files/ori_dataset/cosql_dataset/sql_state_tracking/{mode}_text_list.txt", "w")
if used_coref and not os.path.exists(os.path.join(output_path_base, f"{mode}_coref.json")):
wfile = open(os.path.join(output_path_base, f"{mode}_text_list.txt"), "w")
for idx, entry in tqdm(enumerate(dataset)):
# if idx > 100:
# continue
if dataset_name in ["spider"]:
entry = processor.pipeline(entry, tables[entry['db_id']], dataset_name, idx)
elif dataset_name in ["cosql", "sparc"]:
entry = processor.pipeline(entry, tables[entry['database_id']], dataset_name, idx)
else:
raise NotImplementedError
if used_coref and not os.path.exists(os.path.join(output_path_base, f"{mode}_coref.json")):
wfile.write(str(entry['final_preprocessed_text_list'])+"\n")
processed_dataset.append(entry)
with open(os.path.join(output_path_base, f"{mode}.bin"), 'wb') as dump_f:
fcntl.flock(dump_f.fileno(), fcntl.LOCK_EX)
pickle.dump(processed_dataset, dump_f)
return processed_dataset
def init_dataset_path(data_base_dir, dataset_name, mode):
db_dir = os.path.join(data_base_dir, "ori_dataset", dataset_name, "database")
table_data_path=os.path.join(data_base_dir, "ori_dataset", dataset_name, "tables.json")
table_out_path=os.path.join(data_base_dir, "preprocessed_dataset", dataset_name, "tables.bin")
if mode == "train":
if dataset_name == "spider":
dataset_path=os.path.join(data_base_dir, "ori_dataset", dataset_name, "train_spider.json")
elif dataset_name == "cosql":
db_dir = os.path.join(data_base_dir, "ori_dataset", "cosql_dataset", "database")
dataset_path=os.path.join(data_base_dir, "ori_dataset", "cosql_dataset/sql_state_tracking/", "cosql_train.json")
table_data_path=os.path.join(data_base_dir, "ori_dataset", "cosql_dataset", "tables.json")
elif dataset_name == "sparc":
dataset_path=os.path.join(data_base_dir, "ori_dataset", dataset_name, "train.json")
else:
raise NotImplementedError
# dataset_output_path_base=os.path.join(data_base_dir, "preprocessed_dataset", dataset_name, "train.bin")
elif mode == "dev":
if dataset_name in ["spider", "sparc"] :
dataset_path=os.path.join(data_base_dir, "ori_dataset", dataset_name, "dev.json")
elif dataset_name == "cosql":
db_dir = os.path.join(data_base_dir, "ori_dataset", "cosql_dataset", "database")
dataset_path=os.path.join(data_base_dir, "ori_dataset", "cosql_dataset/sql_state_tracking/", "cosql_dev.json")
table_data_path=os.path.join(data_base_dir, "ori_dataset", "cosql_dataset", "tables.json")
else:
raise NotImplementedError
# dataset_output_path=os.path.join(data_base_dir, "preprocessed_dataset", dataset_name, "dev.bin")
else:
raise NotImplementedError
dataset_output_path_base=os.path.join(data_base_dir, "preprocessed_dataset", dataset_name)
if not os.path.exists(os.path.join(data_base_dir, "preprocessed_dataset", dataset_name)):
os.makedirs(os.path.join(data_base_dir, "preprocessed_dataset", dataset_name))
return db_dir, table_data_path, table_out_path, dataset_path, dataset_output_path_base
def preprocessing_generate_lgerels(data_base_dir, dataset_name, mode, used_coref = False, use_dependency=False):
db_dir, table_data_path, table_out_path, dataset_path, dataset_output_path_base = init_dataset_path(data_base_dir, dataset_name, mode)
processor = Preprocessor(dataset_name, db_dir=db_dir, db_content=True)
# loading database and dataset
print(f"Dataset name: {dataset_name}")
print(f"Mode: {mode}")
if not os.path.exists(table_out_path):
with open(table_data_path, 'r') as load_f:
fcntl.flock(load_f.fileno(), fcntl.LOCK_EX)
tables_list = json.load(load_f)
print('Firstly, preprocess the original databases ...')
tables_list = align_tables_by_dataset_name(dataset_name, tables_list)
print('Tables alignments done...')
start_time = time.time()
tables = process_tables(processor, tables_list, table_out_path)
print('Databases preprocessing costs %.4fs .' % (time.time() - start_time))
else:
tables = pickle.load(open(table_out_path, 'rb'))
print('Databases has been preprocessed. Use cache.')
with open(dataset_path, 'r') as load_f:
fcntl.flock(load_f.fileno(), fcntl.LOCK_EX)
dataset = json.load(load_f)
start_time = time.time()
if not os.path.exists(os.path.join(dataset_output_path_base, f"{mode}.bin")):
dataset = process_dataset(processor, dataset, tables, dataset_name, mode, dataset_output_path_base, used_coref)
print('Dataset preprocessing costs %.4fs .' % (time.time() - start_time))
else:
print('Dataset has been preprocessed. Use cache.')
```
#### File: seq2seq/preprocess/transform_utils.py
```python
import pickle
import numpy as np
import functools
import itertools
def mul_mul_match(t5_toks_list, question_toks_list):
""""match two list of question toks"""
t5_index = [i for i in range(1, len(t5_toks_list)+1)]
question_index = [i for i in range(1, len(question_toks_list)+1)]
index_pair = list(itertools.product(t5_index, question_index))
for i, j in index_pair:
t5_toks = "".join(t5_toks_list[:i])
question_toks = "".join(question_toks_list[:j])
if t5_toks == question_toks:
return i, j
return -1,-1
def mul_mul_match_changeOrder(t5_toks_list, question_toks_list):
""""match two list of question toks"""
t5_index = [i for i in range(0, len(t5_toks_list))]
t5_index.reverse()
question_index = [i for i in range(0, len(question_toks_list))]
question_index.reverse()
index_pair = list(itertools.product(t5_index, question_index))
for i, j in index_pair:
t5_toks = "".join(t5_toks_list[i:])
question_toks = "".join(question_toks_list[j:])
if t5_toks == question_toks:
return i, j
return -1,-1
def cmp(str1, str2):
l1 = str1.split('#')
l2 = str2.split('#')
if (int(l1[0]!=l2[0])):
return -1 if int(l1[0])<int(l2[0]) else 1
else:
return -1 if int(l1[1])<int(l2[1]) else 1
def get_idx_list(res_dict, dataset_name):
if dataset_name in ["cosql", "sparc"]:
key = res_dict.keys()
key = [k.split("_")[-1] for k in key if "relations" in k and "tree" not in k]
key.sort(key = functools.cmp_to_key(cmp))
total_res = [[key[0]]]
tmp = []
for i in range(1, len(key)):
tmp.insert(0, key[i])
if ("#" in key[i] and i+1 < len(key) and key[i].split('#')[0] == key[i+1].split('#')[0]):
continue
a = tmp.copy()
total_res.append(a)
elif dataset_name in ["spider"]:
total_res = [['-1']]
return total_res
def get_idx_list_changeOrder(res_dict, dataset_name):
if dataset_name in ["cosql", "sparc"]:
key = res_dict.keys()
key = [k.split("_")[-1] for k in key if "relations" in k and "tree" not in k]
key.sort(key = functools.cmp_to_key(cmp))
total_res = [[key[0]]]
tmp = []
for i in range(1, len(key)):
tmp.append(key[i])
if ("#" in key[i] and i+1 < len(key) and key[i].split('#')[0] == key[i+1].split('#')[0]):
continue
a = tmp.copy()
total_res.append(a)
elif dataset_name in ["spider"]:
total_res = [['-1']]
return total_res
def isValid(idx, maxlen):
if idx >=0 and idx<maxlen:
return True
return False
def find_sep_mullen(item_list, sep_item):
start = 0
times = 2
sep_list = []
while start < len(item_list):
try:
index = item_list.index(sep_item, start)
start = index+1
if isValid(start, len(item_list)):
if (item_list[start] == sep_item):
sep_list.append(index)
except:
break
sep_list.append(len(item_list))
return sep_list
def find_all_sep_index_from_list(item_list, sep_item):
start = 0
sep_list = []
while start < len(item_list):
try:
index = item_list.index(sep_item, start)
start = index+1
sep_list.append(index)
except:
break
sep_list.append(len(item_list))
return sep_list
def find_all_sep_pair_from_list(item_list, sep_item_1, sep_item_2):
start = 0
sep_list = []
while start < len(item_list):
try:
index_1 = item_list.index(sep_item_1, start)
start = index_1+1
index_2 = item_list.index(sep_item_2, start)
start = index_2+1
sep_list.append((index_1, index_2))
except:
break
sep_list.append(len(item_list))
return sep_list
def raise_key(ori_dict, add_num):
res_dict = {}
for ori_key in ori_dict.keys():
new_key = ori_key + add_num
res_dict[new_key] = ori_dict[ori_key]
return res_dict
def merge_two_dict(ori_dict, change_dict, add_num):
res_dict = {}
res_dict.update(ori_dict)
res_dict.update(raise_key(change_dict, add_num))
return res_dict
def decode_from_dict(t5_tokenizer, d, t5_toks_ids):
v = [t5_toks_ids[i] for item in d.values() for i in item]
print(t5_tokenizer.decode(v).replace("</s>", ""))
def decode_from_pair_dict(t5_tokenizer, d, t5_toks_ids):
v = [t5_toks_ids[id] for items in d.values() for pair in items for id in pair]
if len(v) > 0:
print(t5_tokenizer.decode(v).replace("</s>", ""))
print(t5_tokenizer.decode(t5_toks_ids).replace("</s>", ""))
def tokid2sent(t5_tokenizer, t5_toks_id):
print(t5_tokenizer.decode(t5_toks_id).replace("</s>", ""))
```
#### File: seq2seq/utils/relation_data_collator.py
```python
import random
import warnings
import numpy as np
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
from transformers.tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase
from transformers.file_utils import PaddingStrategy
@dataclass
class DataCollatorForSeq2Seq:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
model (:class:`~transformers.PreTrainedModel`):
The model that is being trained. If set and has the `prepare_decoder_input_ids_from_labels`, use it to
prepare the `decoder_input_ids`
This is useful when using `label_smoothing` to avoid calculating loss twice.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def __call__(self, features, return_tensors=None):
import numpy as np
if return_tensors is None:
return_tensors = self.return_tensors
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if labels is not None:
max_label_length = max(len(l) for l in labels)
if self.pad_to_multiple_of is not None:
max_label_length = (
(max_label_length + self.pad_to_multiple_of - 1)
// self.pad_to_multiple_of
* self.pad_to_multiple_of
)
padding_side = self.tokenizer.padding_side
for feature in features:
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
if isinstance(feature["labels"], list):
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
elif padding_side == "right":
feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
else:
feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
relations = [feature["relations"] for feature in features] if "relations" in features[0].keys() else None
input_ids = [feature["input_ids"] for feature in features] if "input_ids" in features[0].keys() else None
# We have to pad the relation matrixs before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if relations is not None:
sub_len = [len(r)-len(i) for r,i in zip(relations, input_ids)]
assert not(any(sub_len)), "the relations is not equal with input_ids"
max_input_ids = max(len(i) for i in input_ids)
max_relation_length = max(len(r) for r in relations)
assert max_input_ids==max_relation_length, "max_input_ids is not equal to max_relation_length"
max_length = max(max_input_ids, max_relation_length)
for feature in features:
relation_pad_length = max_length - len(feature['relations'])
feature["relations"] = np.pad(np.array(feature["relations"]),((0,relation_pad_length),(0,relation_pad_length)),'constant',constant_values = (0,0)) #constant_values表示填充值,且(before,after)的填充值等于(0,0)
# print("type(features:)", type(features))
# print("type(features[0]:)", type(features[0]))
# print("len(features[0]:)", len(features[0]['input_ids']))
# print("(features[0]:)", features[0]['input_ids'])
features = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=return_tensors,
)
# print("type(features:)", type(features))
# print("type(features[0]:)", type(features[0]))
# print("After pad len(features[0]:)", len(features[0]['input_ids']))
# print("After pad (features[0]:)", features[0]["input_ids"])
# import os;os._exit()
# prepare decoder_input_ids
if self.model is not None and hasattr(self.model, "prepare_decoder_input_ids_from_labels"):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
features["decoder_input_ids"] = decoder_input_ids
return features
```
#### File: seq2seq/utils/spider.py
```python
import json
import torch
import numpy as np
from typing import Optional
from datasets.arrow_dataset import Dataset
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from seq2seq.utils.dataset import DataTrainingArguments, normalize, serialize_schema
from seq2seq.utils.trainer import Seq2SeqTrainer, EvalPrediction
def spider_get_input(
question: str,
serialized_schema: str,
prefix: str,
) -> str:
return prefix + question.strip() + " " + serialized_schema.strip()
def spider_get_target(
query: str,
db_id: str,
normalize_query: bool,
target_with_db_id: bool,
) -> str:
_normalize = normalize if normalize_query else (lambda x: x)
return f"{db_id} | {_normalize(query)}" if target_with_db_id else _normalize(query)
def spider_add_serialized_schema(ex: dict, data_training_args: DataTrainingArguments) -> dict:
serialized_schema = serialize_schema(
question=ex["question"],
db_path=ex["db_path"],
db_id=ex["db_id"],
db_column_names=ex["db_column_names"],
db_table_names=ex["db_table_names"],
schema_serialization_type=data_training_args.schema_serialization_type,
schema_serialization_randomized=data_training_args.schema_serialization_randomized,
schema_serialization_with_db_id=data_training_args.schema_serialization_with_db_id,
schema_serialization_with_db_content=data_training_args.schema_serialization_with_db_content,
normalize_query=data_training_args.normalize_query,
)
return {"serialized_schema": serialized_schema}
def spider_pre_process_function(
batch: dict,
max_source_length: Optional[int],
max_target_length: Optional[int],
data_training_args: DataTrainingArguments,
tokenizer: PreTrainedTokenizerBase,
) -> dict:
prefix = data_training_args.source_prefix if data_training_args.source_prefix is not None else ""
inputs = [
spider_get_input(question=question, serialized_schema=serialized_schema, prefix=prefix)
for question, serialized_schema in zip(batch["question"], batch["serialized_schema"])
]
model_inputs: dict = tokenizer(
inputs,
max_length=max_source_length,
padding=False,
truncation=True,
return_overflowing_tokens=False,
)
targets = [
spider_get_target(
query=query,
db_id=db_id,
normalize_query=data_training_args.normalize_query,
target_with_db_id=data_training_args.target_with_db_id,
)
for db_id, query in zip(batch["db_id"], batch["query"])
]
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(
targets,
max_length=max_target_length,
padding=False,
truncation=True,
return_overflowing_tokens=False,
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
class SpiderTrainer(Seq2SeqTrainer):
def _post_process_function(
self, examples: Dataset, features: Dataset, predictions: np.ndarray, stage: str
) -> EvalPrediction:
inputs = self.tokenizer.batch_decode([f["input_ids"] for f in features], skip_special_tokens=True)
label_ids = [f["labels"] for f in features]
if self.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
_label_ids = np.where(label_ids != -100, label_ids, self.tokenizer.pad_token_id)
decoded_label_ids = self.tokenizer.batch_decode(_label_ids, skip_special_tokens=True)
metas = [
{
"query": x["query"],
"question": x["question"],
"context": context,
"label": label,
"db_id": x["db_id"],
"db_path": x["db_path"],
"db_table_names": x["db_table_names"],
"db_column_names": x["db_column_names"],
"db_foreign_keys": x["db_foreign_keys"],
}
for x, context, label in zip(examples, inputs, decoded_label_ids)
]
predictions = self.tokenizer.batch_decode(predictions, skip_special_tokens=True)
assert len(metas) == len(predictions)
with open(f"{self.args.output_dir}/predictions_{stage}.json", "w") as f:
json.dump(
[dict(**{"prediction": prediction}, **meta) for prediction, meta in zip(predictions, metas)],
f,
indent=4,
)
return EvalPrediction(predictions=predictions, label_ids=label_ids, metas=metas)
def _compute_metrics(self, eval_prediction: EvalPrediction) -> dict:
predictions, label_ids, metas = eval_prediction
if self.target_with_db_id:
# Remove database id from all predictions
predictions = [pred.split("|", 1)[-1].strip() for pred in predictions]
# TODO: using the decoded reference labels causes a crash in the spider evaluator
# if self.ignore_pad_token_for_loss:
# # Replace -100 in the labels as we can't decode them.
# label_ids = np.where(label_ids != -100, label_ids, tokenizer.pad_token_id)
# decoded_references = self.tokenizer.batch_decode(label_ids, skip_special_tokens=True)
# references = [{**{"query": r}, **m} for r, m in zip(decoded_references, metas)]
references = metas
return self.metric.compute(predictions=predictions, references=references)
# # Change it to our custom loss
# def compute_loss(self, model, inputs, return_outputs=False):
# """
# How the loss is computed by Trainer. By default, all models return the loss in the first element.
# Subclass and override for custom behavior.
# """
# if self.label_smoother is not None and "labels" in inputs:
# labels = inputs.pop("labels")
# else:
# labels = None
# outputs = model(**inputs)
# # Save past state if it exists
# # TODO: this needs to be fixed and made cleaner later.
# if self.args.past_index >= 0:
# self._past = outputs[self.args.past_index]
# if labels is not None:
# loss = self.label_smoother(outputs, labels)
# else:
# # We don't use .loss here since the model may return tuples instead of ModelOutput.
# loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
# def get_relation_norm(model):
# '''
# get relation params norm
# '''
# norm_loss = 0
# for name, param in model.parameters():
# if 'relation' in name:
# norm_loss += 0.5 * torch.sum(param**2)
# return norm_loss
# loss += get_relation_norm(model)
# return (loss, outputs) if return_outputs else loss
```
#### File: seq2seq/utils/trainer.py
```python
import collections
from typing import Dict, List, Optional, NamedTuple, Any, Tuple, Union
import transformers.trainer_seq2seq
from transformers.trainer_utils import PredictionOutput, speed_metrics
from transformers.optimization import get_scheduler, Adafactor, AdamW
from transformers.trainer_pt_utils import get_parameter_names
from transformers.trainer_utils import ShardedDDPOption
from transformers.file_utils import is_sagemaker_mp_enabled
from datasets.arrow_dataset import Dataset
from datasets.metric import Metric
import numpy as np
import time
import torch
import torch.nn as nn
from transformers.deepspeed import is_deepspeed_zero3_enabled
from packaging import version
from seq2seq.utils.custom_lr_scheduler import get_scheduler_custom
if version.parse(torch.__version__) >= version.parse("1.6"):
from torch.cuda.amp import autocast
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
class EvalPrediction(NamedTuple):
predictions: List[str]
label_ids: np.ndarray
metas: List[dict]
class Seq2SeqTrainer(transformers.trainer_seq2seq.Seq2SeqTrainer):
def __init__(
self,
metric: Metric,
*args,
eval_examples: Optional[Dataset] = None,
ignore_pad_token_for_loss: bool = True,
target_with_db_id: bool = False,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.metric = metric
self.eval_examples = eval_examples
self.compute_metrics = self._compute_metrics
self.ignore_pad_token_for_loss = ignore_pad_token_for_loss
self.target_with_db_id = target_with_db_id
def _compute_metrics(self, eval_prediction: EvalPrediction) -> dict:
raise NotImplementedError()
def _post_process_function(
self, examples: Dataset, features: Dataset, predictions: np.ndarray, stage: str
) -> EvalPrediction:
raise NotImplementedError()
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
eval_examples: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
max_length: Optional[int] = None,
max_time: Optional[int] = None,
num_beams: Optional[int] = None,
) -> Dict[str, float]:
self._max_length = max_length
self._max_time = max_time
self._num_beams = num_beams
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_examples = self.eval_examples if eval_examples is None else eval_examples
start_time = time.time()
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
try:
output: PredictionOutput = self.evaluation_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
finally:
self.compute_metrics = compute_metrics
# We might have removed columns from the dataset so we put them back.
if isinstance(eval_dataset, Dataset):
eval_dataset.set_format(
type=eval_dataset.format["type"],
columns=list(eval_dataset.features.keys()),
)
if eval_examples is not None and eval_dataset is not None and self.compute_metrics is not None:
eval_preds = self._post_process_function(
eval_examples,
eval_dataset,
output.predictions,
"eval_{}".format(self.state.epoch),
)
output.metrics.update(self.compute_metrics(eval_preds))
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
# Prefix all keys with metric_key_prefix + '_'
for key in list(output.metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
output.metrics[f"{metric_key_prefix}_{key}"] = output.metrics.pop(key)
self.log(output.metrics)
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self,
test_dataset: Dataset,
test_examples: Dataset,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
max_length: Optional[int] = None,
max_time: Optional[int] = None,
num_beams: Optional[int] = None,
) -> PredictionOutput:
self._max_length = max_length
self._max_time = max_time
self._num_beams = num_beams
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
try:
output: PredictionOutput = self.evaluation_loop(
test_dataloader,
description="Prediction",
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
finally:
self.compute_metrics = compute_metrics
if self.compute_metrics is not None:
# We might have removed columns from the dataset so we put them back.
if isinstance(test_dataset, Dataset):
test_dataset.set_format(
type=test_dataset.format["type"],
columns=list(test_dataset.features.keys()),
)
eval_preds = self._post_process_function(
test_examples, test_dataset, output.predictions, metric_key_prefix)
output.metrics.update(self.compute_metrics(eval_preds))
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
# Prefix all keys with metric_key_prefix + '_'
for key in list(output.metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
output.metrics[f"{metric_key_prefix}_{key}"] = output.metrics.pop(key)
self.log(output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
if not self.args.predict_with_generate or prediction_loss_only:
return super().prediction_step(
model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys
)
has_labels = "labels" in inputs
inputs = self._prepare_inputs(inputs)
# XXX: adapt synced_gpus for fairscale as well
gen_kwargs = {
"max_length": self._max_length if self._max_length is not None else self.model.config.max_length,
"num_beams": self._num_beams if self._num_beams is not None else self.model.config.num_beams,
"synced_gpus": True if is_deepspeed_zero3_enabled() else False,
}
if "relations" in inputs.keys():
generated_tokens = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
relations=inputs["relations"],
**gen_kwargs,
)
else:
generated_tokens = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**gen_kwargs,
)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"])
with torch.no_grad():
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if has_labels:
if self.label_smoother is not None:
loss = self.label_smoother(outputs, inputs["labels"]).mean().detach()
else:
loss = (outputs["loss"] if isinstance(outputs, dict) else outputs[0]).mean().detach()
else:
loss = None
if self.args.prediction_loss_only:
return (loss, None, None)
labels = inputs["labels"]
if labels.shape[-1] < gen_kwargs["max_length"]:
labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"])
return (loss, generated_tokens, labels)
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through `optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
decay_parameters = get_parameter_names(self.model, [nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
# decay_parameters = [name for name in decay_parameters if "shared" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
return self.optimizer
def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
"""
Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
passed as an argument.
Args:
num_training_steps (int): The number of training steps to do.
"""
custom_schedule_lr_list = set(["step_lr", "multi_step_lr", "exponential_lr", "cosine_annealing_lr"])
# self.args.lr_scheduler_type = "step_lr"
# lr_scheduler_type = "step_lr"
lr_scheduler_type = ""
if self.lr_scheduler is None:
if lr_scheduler_type in custom_schedule_lr_list:
self.lr_scheduler = get_scheduler_custom(
# self.args.lr_scheduler_type,
lr_scheduler_type,
optimizer=self.optimizer if optimizer is None else optimizer,
)
else:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
optimizer=self.optimizer if optimizer is None else optimizer,
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
)
return self.lr_scheduler
``` |
{
"source": "jiexingtianxia/AutopodspecLint",
"score": 2
} |
#### File: jiexingtianxia/AutopodspecLint/autopodspecLint.py
```python
import os, sys
import fileinput
import time
new_tag = raw_input('请输入项目更新版本号:')
pod_sources = 'https://github.com/jiexingtianxia/G_Hey_podSpecs.git'
git_sources = 'https://github.com/jiexingtianxia/G_OSLib.git'
project_name = 'G_Hey_podSpecs'
podspec_name = 'G_OSLib.podspec'
project_path = os.getcwd()
new_tag = new_tag.strip()
lib_cmd = ''
pod_push_cmd = ''
spec_file_path = project_path + "/" + podspec_name
def updateVersion():
f = open(spec_file_path,'r+')
infos = f.readlines()
f.seek(0, 0)
file_data = ""
new_line = ""
for line in infos:
if line.find('s.version ') != -1:
line = ' s.version = \'%s\' \n' % new_tag
elif line.find('s.source ') != -1:
line = ' s.source = { :git => \'%s\', :tag => \'%s\' } \n' % (git_sources,new_tag)
else :
print('==================')
print('******************%s') % line
file_data += line
with open(spec_file_path, 'w', ) as f1:
f1.write(file_data)
f.close()
def podCmd():
global lib_cmd
global pod_push_cmd
source_suffix = 'https://github.com/CocoaPods/Specs.git --allow-warnings'
lib_cmd = 'pod lib lint --sources='
lib_cmd += git_sources
lib_cmd += ','
lib_cmd += source_suffix
pod_push_cmd = 'pod repo push ' + project_name + ' ' + podspec_name
pod_push_cmd += ' --sources='
pod_push_cmd += pod_sources
pod_push_cmd += ','
pod_push_cmd += source_suffix
def libLint():
print('*****************waiting for pod lib lint checking 🍺 🍺 🍺')
os.system(lib_cmd)
def gitPush():
print('*****************waiting for git push 🍺 🍺 🍺')
os.system('git add .')
commit_desc = 'version_' + new_tag
commit_cmd = 'git commit -m ' + commit_desc
os.system(commit_cmd)
r = os.popen('git symbolic-ref --short -q HEAD')
current_branch = r.read()
r.close()
push_cmd = 'git push origin ' + current_branch
tag_cmd = 'git tag ' + new_tag
os.system(push_cmd)
os.system(tag_cmd)
os.system('git push --tags')
def podPush():
print('*****************waiting for pod push 🍺 🍺 🍺')
os.system(pod_push_cmd)
updateVersion()
podCmd()
libLint()
gitPush()
podPush()
``` |
{
"source": "Jiexin-Zheng/BERT_with_keras",
"score": 3
} |
#### File: Jiexin-Zheng/BERT_with_keras/optimization.py
```python
import re
import tensorflow as tf
import keras.backend as K
from keras.optimizers import Optimizer
from keras.legacy import interfaces
class AdamWeightDecayOpt(Optimizer):
"""Adam optimizer.
Default parameters follow those provided in the original paper.
# Arguments
lr: float >= 0. Learning rate.
num_train_steps: total number train batches in N epoches. np.ceil(train_samples/batch_size)*epoches.
num_warmup_steps: number steps of warmup
weight_decay_rate: apply weight decay to weights
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
bias_corrected: boolean. whether or not to use bias_corrected to adam optimizer.
exclude_from_weight_decay: list of str. weights is excluded from weight deacy.
# References
- [Adam - A Method for Stochastic Optimization]
(https://arxiv.org/abs/1412.6980v8)
- [On the Convergence of Adam and Beyond]
(https://openreview.net/forum?id=ryQu7f-RZ)
"""
def __init__(self, lr, num_train_steps, num_warmup_steps, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999,
epsilon=1e-6, bias_corrected=False, exclude_from_weight_decay=None, **kwargs):
super(AdamWeightDecayOpt, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.epsilon = epsilon
self.weight_decay_rate = weight_decay_rate
self.exclude_from_weight_decay = exclude_from_weight_decay
self.num_train_steps = num_train_steps
self.num_warmup_steps = num_warmup_steps
self.bias_corrected = bias_corrected
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = tf.train.polynomial_decay(
self.lr,
self.iterations,
self.num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False
)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
t = K.cast(self.iterations, K.floatx()) + 1
warmup_percent_done = K.cast(t / self.num_warmup_steps, dtype=K.floatx())
warmup_lr = self.lr * warmup_percent_done
is_warmup = K.cast(t < self.num_warmup_steps, dtype=K.floatx())
lr = ((1.0 - is_warmup) * lr) + is_warmup * warmup_lr
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
if self.bias_corrected:
m_t /= 1 - K.pow(self.beta_1, t)
v_t /= 1 - K.pow(self.beta_2, t)
update = m_t / (K.sqrt(v_t) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
param_name = self._get_variable_name(p.name)
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * p
p_t = p - lr * update
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'epsilon': self.epsilon}
base_config = super(AdamWeightDecayOpt, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
``` |
{
"source": "jiexuan/evaluation_tools",
"score": 3
} |
#### File: evo-plot/contrib/rename_est_name.py
```python
from evo.tools import file_interface
DESC = """rename the 'est_name' field in a result file"""
def main(res_file, new_name):
result = file_interface.load_res_file(res_file)
result.info["est_name"] = new_name
file_interface.save_res_file(res_file, result)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument("res_file", help="evo result file")
parser.add_argument("new_name", help="new 'est_name'")
args = parser.parse_args()
main(args.res_file, args.new_name)
```
#### File: evo/tools/settings_template.py
```python
import pkgutil
def get_default_plot_backend():
backends = {"PyQt5": "Qt5Agg", "PyQt4": "Qt4Agg"}
for pkg in backends:
if pkgutil.find_loader(pkg) is not None:
return backends[pkg]
return "TkAgg"
# default settings with documentation
DEFAULT_SETTINGS_DICT_DOC = {
"plot_xyz_realistic": (
True,
"Equal axes ratio in 'xyz' plot mode for realistic trajectory plots."
),
"plot_backend": (
get_default_plot_backend(),
"matplotlib backend - default: 'Qt{4, 5}Agg' (if PyQt is installed) or 'TkAgg'."
),
"plot_hideref": (
False,
"Hide the reference trajectory in trajectory plots."
),
"plot_linewidth": (
1.5,
"Line width value supported by matplotlib."
),
"plot_usetex": (
False,
"Use the LaTeX renderer configured in plot_texsystem for plots.",
),
"plot_texsystem": (
"pdflatex",
"'xelatex', 'lualatex' or 'pdflatex', see: https://matplotlib.org/users/pgf.html",
),
"plot_fontfamily": (
"sans-serif",
"Font family string supported by matplotlib."
),
"plot_fontscale": (
1.0,
"Font scale value, see: https://seaborn.pydata.org/generated/seaborn.set.html"
),
"plot_split": (
False,
"Show / save each figure separately instead of a collection."
),
"plot_figsize": (
[6, 6],
"The default size of one (sub)plot figure (width, height)."
),
"plot_trajectory_cmap": (
"jet",
"matplotlib color map used for mapping values on a trajectory.",
),
"plot_multi_cmap": (
"none",
"Color map for coloring plots from multiple data sources.\n"
+ "'none' will use the default color palette, see plot_seaborn_palette."
),
"plot_invert_xaxis": (
False,
"Invert the x-axis of plots."
),
"plot_invert_yaxis": (
False,
"Invert the y-axis of plots."
),
"plot_seaborn_style": (
"darkgrid",
"Defines the plot background/grid.\n"
+ "Options: 'whitegrid', 'darkgrid', 'white' or 'dark'."
),
"plot_seaborn_palette": (
"deep",
"Default color palette of seaborn. Can also be a list of colors.\n"
+ "See: https://seaborn.pydata.org/generated/seaborn.color_palette.html"
),
"plot_export_format": (
"pdf",
"File format supported by matplotlib for exporting plots."
),
"table_export_format": (
"csv",
"Format for exporting tables, e.g. 'csv', 'excel', 'latex', 'json'...",
),
"table_export_data": (
"stats",
"Which data to export: 'info', 'stats' or 'error_array'.",
),
"table_export_transpose": (
True,
"Transpose tables for export."
),
"save_traj_in_zip": (
False,
"Store backup trajectories in result zip files (increases size)."
),
"logging_format": (
"%(message)s",
"Format string for the logging module (console only)."
),
"logfile_enabled": (
False,
"Whether to write a logfile to the home folder."
)
}
# without documentation
DEFAULT_SETTINGS_DICT = {k: v[0] for k, v in DEFAULT_SETTINGS_DICT_DOC.items()}
``` |
{
"source": "JieYang031/deepcpg",
"score": 3
} |
#### File: deepcpg/scripts/dcpg_data.py
```python
from __future__ import print_function
from __future__ import division
from collections import OrderedDict #import dictory remember order of adding
import os
import sys
import warnings
import argparse
import logging
import h5py as h5
import numpy as np
import pandas as pd
import six
from six.moves import range
#mainly used the self-defined functions in ./deepcpg/data/*.py.
from deepcpg import data as dat # import folder ./deepcpg/data/, use functions in this folder
from deepcpg.data import annotations as an
from deepcpg.data import stats
from deepcpg.data import dna
from deepcpg.data import fasta
from deepcpg.data import feature_extractor as fext
from deepcpg.utils import make_dir
#the input is a list, length=#samples. sample has a pd data frame with: chromo, pos.
#the output of this function is merged pd data frame with chromo and pos. Any position ever exist in one sample
#will be kept and all positions will be sorted
def prepro_pos_table(pos_tables):
"""Extracts unique positions and sorts them."""
if not isinstance(pos_tables, list): #check if pos_tables is a list. This may happen if only one file was read for it.
#isinstance(object, classinfo), used to check if the object belongs to the class.
pos_tables = [pos_tables]
pos_table = None
for next_pos_table in pos_tables:
if pos_table is None: #for 1st round of loop: pos_table = None because just assigned, next_pos_table = pos_tables[0]
pos_table = next_pos_table #pos_table = next_pos_table = pos_tables[0]
else: #for 2nd and all following round, pos_table != None
pos_table = pd.concat([pos_table, next_pos_table]) # concatenate all samples' pos together.
pos_table = pos_table.groupby('chromo').apply(
lambda df: pd.DataFrame({'pos': np.unique(df['pos'])})) ##keep only unique position value (int32).
#also, 'pos' has been grouped by chromo
pos_table.reset_index(inplace=True) #reset index, will show three columns, "chromo", "level_1", "pos"
pos_table = pos_table[['chromo', 'pos']] #select columns
#>>> pos_table.iloc[:10,]
# chromo level_1 pos
#0 1 0 3000827
#1 1 1 3001007
#2 1 2 3001018
pos_table.sort_values(['chromo', 'pos'], inplace=True)
return pos_table
def split_ext(filename):
"""Remove file extension from `filename`."""
return os.path.basename(filename).split(os.extsep)[0] #return file name
def read_cpg_profiles(filenames, log=None, *args, **kwargs):
"""Read methylation profiles.
Input files can be gzip compressed.
Returns
-------
dict
`dict (key, value)`, where `key` is the output name and `value` the CpG
table.
"""
cpg_profiles = OrderedDict() #a dictionary which remember the order of item inserted, when iterating it,
#items are returned in the order their keys were first added.
for filename in filenames:
if log:
log(filename)
cpg_file = dat.GzipFile(filename, 'r') #Wrapper to read and write gzip-compressed files.
output_name = split_ext(filename) #Remove file extension from `filename`, defined above
cpg_profile = dat.read_cpg_profile(cpg_file, sort=True, *args, **kwargs) #Read CpG profile from TSV or bedGraph file.
#return :class:`pandas.DataFrame` with columns `chromo`, `pos`, `value`.
cpg_profiles[output_name] = cpg_profile #cpg_profiles store multiple sample information
cpg_file.close()
return cpg_profiles #return ordered dictory, each item is a pandas data frame
def extract_seq_windows(seq, pos, wlen, seq_index=1, assert_cpg=False):
"""Extracts DNA sequence windows at positions.
Parameters
----------
seq: str
DNA sequence.
pos: list
Positions at which windows are extracted.
wlen: int
Window length.
seq_index: int
Offset at which positions start.
assert_cpg: bool
If `True`, check if positions in `pos` point to CpG sites.
Returns
-------
np.array
Array with integer-encoded sequence windows.
"""
delta = wlen // 2
nb_win = len(pos) #nb_win=32768, which is the default chunk size
seq = seq.upper() #change to upper case
seq_wins = np.zeros((nb_win, wlen), dtype='int8') #seq_wins.shape = (32768, 1001)
for i in range(nb_win):
p = pos[i] - seq_index
if p < 0 or p >= len(seq):
raise ValueError('Position %d not on chromosome!' % (p + seq_index))
if seq[p:p + 2] != 'CG':
warnings.warn('No CpG site at position %d!' % (p + seq_index))
win = seq[max(0, p - delta): min(len(seq), p + delta + 1)]
if len(win) < wlen: #which means cannot extract 1001 window size from original fasta sequence.
#this may caused by the targeted position is so close to end of the chromosome
win = max(0, delta - p) * 'N' + win #add NNN to seq
win += max(0, p + delta + 1 - len(seq)) * 'N' #add something and assign the new value to it.
#this equals to win = win + max(0, p + delta + 1 - len(seq)) * 'N'
assert len(win) == wlen #assert: used to catch bugs
seq_wins[i] = dna.char_to_int(win) #Translate chars of single sequence `seq` to ints
#ATGCN were transferred to 0-4
# Randomly choose missing nucleotides
idx = seq_wins == dna.CHAR_TO_INT['N'] #idx is numpy array with both True/False value
seq_wins[idx] = np.random.randint(0, 4, idx.sum())
#np.random.randint(0, 4, idx.sum()).shape = (992,) which is the same shape as idx
assert seq_wins.max() < 4 #make sure this is true, or it will stop and report error
if assert_cpg:
assert np.all(seq_wins[:, delta] == 3) #Test whether all array elements along a given axis evaluate to True.
assert np.all(seq_wins[:, delta + 1] == 2)
return seq_wins
def map_values(values, pos, target_pos, dtype=None, nan=dat.CPG_NAN):
"""Maps `values` array at positions `pos` to `target_pos`.
Inserts `nan` for uncovered positions.
"""
assert len(values) == len(pos) #judge T/F, T: keep running; F: stop the program
assert np.all(pos == np.sort(pos)) #check if pos has been sorted
assert np.all(target_pos == np.sort(target_pos)) #check if target_pos has been sorted
values = values.ravel() #returns contiguous flattened array(1D array with all the input-array
#elements and with the same type as it).
#however, values = cpg_table.value.values, it is already a 1D array
pos = pos.ravel()
target_pos = target_pos.ravel() #maybe just double verify??
idx = np.in1d(pos, target_pos) #Test whether each element of a 1-D array is also present in a second array.
#pos is much shorter than the target_pos, but make sure the first is fully covered by the second.
pos = pos[idx] #idx is all TRUE.
values = values[idx]
if not dtype:
dtype = values.dtype #dtype set as int8
target_values = np.empty(len(target_pos), dtype=dtype) #create empty array with specified shape and type
target_values.fill(nan) #fill it with missing, default is -1
idx = np.in1d(target_pos, pos).nonzero()[0] #Return the indices of the elements that are non-zero.
assert len(idx) == len(values)
assert np.all(target_pos[idx] == pos)
target_values[idx] = values
return target_values
def map_cpg_tables(cpg_tables, chromo, chromo_pos):
"""Maps values from cpg_tables to `chromo_pos`.
Positions in `cpg_tables` for `chromo` must be a subset of `chromo_pos`.
Inserts `dat.CPG_NAN` for uncovered positions.
"""
chromo_pos.sort() #sorts the elements of a given list in a specific order, numpy array with 1D
mapped_tables = OrderedDict() #create dictionary
for name, cpg_table in six.iteritems(cpg_tables): #cpg_tables, OrderedDict,
##cpg_tables: sample items, each item stored each sample's chro pos, and value
cpg_table = cpg_table.loc[cpg_table.chromo == chromo] #selected cpg_table, #samples rows * 3 column
cpg_table = cpg_table.sort_values('pos') #sort by position column
mapped_table = map_values(cpg_table.value.values, #1D numpy array, (266747,)
cpg_table.pos.values, #1D numpy array, (266747,)
chromo_pos) #1D numpy array, (402166,)
#return numpy 1D array. (402166,), exit 1, 0, -1 (nan default value)
assert len(mapped_table) == len(chromo_pos)
mapped_tables[name] = mapped_table
return mapped_tables
def format_out_of(out, of):
return '%d / %d (%.1f%%)' % (out, of, out / of * 100)
def get_stats_meta(names):
funs = OrderedDict()
for name in names:
fun = stats.get(name) #Return object from module by its name
if name in ['mode', 'cat_var', 'cat2_var', 'diff']:
dtype = np.int8
else:
dtype = np.float32
funs[name] = (fun, dtype)
return funs
def select_dict(data, idx):
data = data.copy()
for key, value in six.iteritems(data):
if isinstance(value, dict):
data[key] = select_dict(value, idx)
else:
data[key] = value[idx]
return data
def annotate(anno_file, chromo, pos):
anno_file = dat.GzipFile(anno_file, 'r')
anno = pd.read_table(anno_file, header=None, usecols=[0, 1, 2],
dtype={0: 'str', 1: 'int32', 2: 'int32'})
anno_file.close()
anno.columns = ['chromo', 'start', 'end']
anno.chromo = anno.chromo.str.upper().str.replace('CHR', '')
anno = anno.loc[anno.chromo == chromo]
anno.sort_values('start', inplace=True)
start, end = an.join_overlapping(anno.start.values, anno.end.values)
anno = np.array(an.is_in(pos, start, end), dtype='int8')
return anno
class App(object):
def run(self, args):
name = os.path.basename(args[0])
parser = self.create_parser(name)
opts = parser.parse_args(args[1:])
return self.main(name, opts)
def create_parser(self, name):
p = argparse.ArgumentParser(
prog=name,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Creates DeepCpG data for training and testing.')
# I/O
p.add_argument(
'--pos_file',
help='File with positions of CpG sites that are to be predicted.'
' If missing, only CpG sites that are observed in at least one of'
' the given cells will be used.')
p.add_argument(
'--cpg_profiles',
help='Input single-cell methylation profiles in dcpg or bedGraph'
' format that are to be imputed',
nargs='+')
p.add_argument(
'--cpg_wlen',
help='If provided, extract `cpg_wlen`//2 neighboring CpG sites',
type=int)
p.add_argument(
'--cpg_cov',
help='Minimum CpG coverage. Only use CpG sites for which the true'
' methylation state is known in at least that many cells.',
type=int,
default=1)
p.add_argument(
'--dna_files',
help='Directory or FASTA files named "*.chromosome.`chromo`.fa*"'
' with the DNA sequences for chromosome `chromo`.',
nargs='+')
p.add_argument(
'--dna_wlen',
help='DNA window length',
type=int,
default=1001)
p.add_argument(
'--anno_files',
help='Files with genomic annotations that are used as input'
' features. Currently ignored by `dcpg_train.py`.',
nargs='+')
p.add_argument(
'-o', '--out_dir',
help='Output directory',
default='.')
g = p.add_argument_group('output statistics')
g.add_argument(
'--cpg_stats',
help='Per CpG statistics derived from single-cell profiles.'
' Required, e.g., for predicting mean methylation levels or'
' cell-to-cell variance.',
nargs='+',
choices=['mean', 'mode', 'var', 'cat_var', 'cat2_var', 'entropy',
'diff', 'cov'])
g.add_argument(
'--cpg_stats_cov',
help='Minimum coverage for computing per CpG statistics',
type=int,
default=3)
g.add_argument(
'--win_stats',
help='Window-based output statistics derived from single-cell'
' profiles. Required, e.g., for predicting mean methylation levels'
' or cell-to-cell variance.',
nargs='+',
choices=['mean', 'mode', 'var', 'cat_var', 'cat2_var', 'entropy',
'diff', 'cov'])
g.add_argument(
'--win_stats_wlen',
help='Window lengths for computing statistics',
type=int,
nargs='+',
default=[1001, 2001, 3001, 4001, 5001])
g = p.add_argument_group('advanced arguments')
g.add_argument(
'--chromos',
nargs='+',
help='Chromosomes that are used')
g.add_argument(
'--nb_sample',
type=int,
help='Maximum number of samples')
g.add_argument(
'--nb_sample_chromo',
type=int,
help='Number of random samples from each chromosome')
g.add_argument(
'--chunk_size',
type=int,
default=32768,
help='Maximum number of samples per output file. Should be'
' divisible by batch size.')
g.add_argument(
'--seed',
help='Seed of random number generator',
type=int,
default=0)
g.add_argument(
'--verbose',
help='More detailed log messages',
action='store_true')
g.add_argument(
'--log_file',
help='Write log messages to file')
return p
def main(self, name, opts):
if opts.seed is not None:
np.random.seed(opts.seed)
logging.basicConfig(filename=opts.log_file,
format='%(levelname)s (%(asctime)s): %(message)s')
log = logging.getLogger(name)
if opts.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
log.debug(opts)
# Check input arguments
if not opts.cpg_profiles:
if not (opts.pos_file or opts.dna_files):
raise ValueError('Position table and DNA database expected!')
if opts.dna_wlen and opts.dna_wlen % 2 == 0:
raise '--dna_wlen must be odd!'
if opts.cpg_wlen and opts.cpg_wlen % 2 != 0:
raise '--cpg_wlen must be even!'
# Parse functions for computing output statistics
cpg_stats_meta = None
win_stats_meta = None
if opts.cpg_stats:
cpg_stats_meta = get_stats_meta(opts.cpg_stats)
if opts.win_stats:
win_stats_meta = get_stats_meta(opts.win_stats)
make_dir(opts.out_dir)
outputs = OrderedDict()
# Read single-cell profiles if provided
if opts.cpg_profiles:
log.info('Reading CpG profiles ...')
outputs['cpg'] = read_cpg_profiles(
opts.cpg_profiles,
chromos=opts.chromos,
nb_sample=opts.nb_sample,
nb_sample_chromo=opts.nb_sample_chromo,
log=log.info)
# Create table with unique positions
if opts.pos_file: #the pos_file provide the CpG positions which need to be predicted
# Read positions from file
log.info('Reading position table ...')
pos_table = pd.read_table(opts.pos_file, usecols=[0, 1],
dtype={0: str, 1: np.int32},
header=None, comment='#')
pos_table.columns = ['chromo', 'pos']
pos_table['chromo'] = dat.format_chromo(pos_table['chromo'])
pos_table = prepro_pos_table(pos_table)
else:
# Extract positions from profiles, if not provided. Predict position which available in at least one cells.
pos_tables = []
for cpg_table in list(outputs['cpg'].values()):
pos_tables.append(cpg_table[['chromo', 'pos']])
pos_table = prepro_pos_table(pos_tables)
if opts.chromos:
pos_table = pos_table.loc[pos_table.chromo.isin(opts.chromos)]
if opts.nb_sample_chromo:
pos_table = dat.sample_from_chromo(pos_table, opts.nb_sample_chromo)
if opts.nb_sample:
pos_table = pos_table.iloc[:opts.nb_sample]
log.info('%d samples' % len(pos_table))
make_dir(opts.out_dir)
# Iterate over chromosomes
# ------------------------
for chromo in pos_table.chromo.unique():
log.info('-' * 80)
log.info('Chromosome %s ...' % (chromo))
idx = pos_table.chromo == chromo ##idx is T/F for whether the entries are equal to the chromo
chromo_pos = pos_table.loc[idx].pos.values #a numpy array with 1D data
chromo_outputs = OrderedDict()
if 'cpg' in outputs:
# Concatenate CpG tables into single nb_site x nb_output matrix
chromo_outputs['cpg'] = map_cpg_tables(outputs['cpg'],
chromo, chromo_pos)
#chromo_outputs, one array called 'cpg', 'cpg' has #sample array,
#each item is mapped table of target_pos with value filled
#OrderedDict([('BS27_1_SER', array([1, 1, 1, ..., 1, 1, 0], dtype=int8)),
#('BS27_3_SER', array([-1, 1, 1, ..., 1, -1, -1], dtype=int8))])
chromo_outputs['cpg_mat'] = np.vstack(
list(chromo_outputs['cpg'].values())).T
#add one more array to it. np.vstack, stack array sequence vertically
#chromo_outputs['cpg_mat'].shape=(402166, 2)
#402166 is the CHR1 target pos number, 2 is the input two samples, BS27_1_SER, BS27_3_SER
assert len(chromo_outputs['cpg_mat']) == len(chromo_pos)
if 'cpg_mat' in chromo_outputs and opts.cpg_cov:
cov = np.sum(chromo_outputs['cpg_mat'] != dat.CPG_NAN, axis=1)
assert np.all(cov >= 1)
idx = cov >= opts.cpg_cov
tmp = '%s sites matched minimum coverage filter'
tmp %= format_out_of(idx.sum(), len(idx))
log.info(tmp)
if idx.sum() == 0:
continue
chromo_pos = chromo_pos[idx]
chromo_outputs = select_dict(chromo_outputs, idx)
# Read DNA of chromosome
chromo_dna = None
if opts.dna_files: #this will only read the corresponding chromosome sequence
chromo_dna = fasta.read_chromo(opts.dna_files, chromo) #chromo_dna is string, len=195471971 for chr1
annos = None
if opts.anno_files:
log.info('Annotating CpG sites ...')
annos = dict()
for anno_file in opts.anno_files:
name = split_ext(anno_file)
annos[name] = annotate(anno_file, chromo, chromo_pos)
# Iterate over chunks
# -------------------
nb_chunk = int(np.ceil(len(chromo_pos) / opts.chunk_size))
for chunk in range(nb_chunk):
log.info('Chunk \t%d / %d' % (chunk + 1, nb_chunk))
chunk_start = chunk * opts.chunk_size
chunk_end = min(len(chromo_pos), chunk_start + opts.chunk_size)
chunk_idx = slice(chunk_start, chunk_end)
chunk_pos = chromo_pos[chunk_idx]
chunk_outputs = select_dict(chromo_outputs, chunk_idx) #OrderedDict()
#chunk_outputs is 1D array
filename = 'c%s_%06d-%06d.h5' % (chromo, chunk_start, chunk_end)
filename = os.path.join(opts.out_dir, filename)
chunk_file = h5.File(filename, 'w')
# Write positions
chunk_file.create_dataset('chromo', shape=(len(chunk_pos),),
dtype='S2') #create_dataset() in default for h5py
chunk_file['chromo'][:] = chromo.encode() #set the chunk_file['chromo'] = 1 for all.
#chunk_file['chromo'].shape = (32768,)
chunk_file.create_dataset('pos', data=chunk_pos, dtype=np.int32)
#chunk_file['pos'].shape = (32768,) # the size is default chunk_size
if len(chunk_outputs): #len(chunk_outputs)=2
out_group = chunk_file.create_group('outputs')
#for now, type(out_group) = <class 'h5py._hl.group.Group'>
#list(out_group) = []
# Write cpg profiles
if 'cpg' in chunk_outputs:
for name, value in six.iteritems(chunk_outputs['cpg']):
#name = ["BS27_1_SER", 'BS27_3_SER'] # the sample name
#value= 2 numpy array, both with shape=(32768,)
assert len(value) == len(chunk_pos)
# Round continuous values
out_group.create_dataset('cpg/%s' % name,
data=value.round(),
dtype=np.int8,
compression='gzip')
#type(out_group)= <class 'h5py._hl.group.Group'>
#list(out_group) = ['cpg']
#list(out_group['cpg']) = ['BS27_1_SER', 'BS27_3_SER']
# Compute and write statistics
if cpg_stats_meta is not None:
log.info('Computing per CpG statistics ...')
cpg_mat = np.ma.masked_values(chunk_outputs['cpg_mat'],
dat.CPG_NAN)
#cpg_mat.shape=(32768, 2)
mask = np.sum(~cpg_mat.mask, axis=1)
mask = mask < opts.cpg_stats_cov
for name, fun in six.iteritems(cpg_stats_meta):
stat = fun[0](cpg_mat).data.astype(fun[1])
stat[mask] = dat.CPG_NAN
assert len(stat) == len(chunk_pos)
out_group.create_dataset('cpg_stats/%s' % name,
data=stat,
dtype=fun[1],
compression='gzip')
#until here:
#>>> chunk_file.visit(printname)
#chromo
#outputs
#outputs/cpg
#outputs/cpg/BS27_1_SER
#utputs/cpg/BS27_3_SER
#pos
# Write input features
in_group = chunk_file.create_group('inputs')
# DNA windows
if chromo_dna:
log.info('Extracting DNA sequence windows ...')
dna_wins = extract_seq_windows(chromo_dna, pos=chunk_pos,
wlen=opts.dna_wlen)
#give the fasta sequence of one chromosome ('chromo_dna'), and targeted position ('chunk_pos')
#, and wlen=1001, return a numpy array with shape as (32768, 1001). The array has been transfered as
#number rather than base pair
assert len(dna_wins) == len(chunk_pos)
in_group.create_dataset('dna', data=dna_wins, dtype=np.int8,
compression='gzip')
#>>> in_group.visit(printname) = dna
# CpG neighbors
if opts.cpg_wlen:
log.info('Extracting CpG neighbors ...')
cpg_ext = fext.KnnCpgFeatureExtractor(opts.cpg_wlen // 2)
context_group = in_group.create_group('cpg')
# outputs['cpg'], since neighboring CpG sites might lie
# outside chunk borders and un-mapped values are needed
for name, cpg_table in six.iteritems(outputs['cpg']):
#name="BS27_1_SER" and "BS27_3_SER"
#cpg_table = numpy array, with three columns information for each input sample.
cpg_table = cpg_table.loc[cpg_table.chromo == chromo]
state, dist = cpg_ext.extract(chunk_pos,
cpg_table.pos.values,
cpg_table.value.values) #extract the cpg distance and state with wlen
nan = np.isnan(state)
state[nan] = dat.CPG_NAN #set nan value as -1, which means unknown
dist[nan] = dat.CPG_NAN
# States can be binary (np.int8) or continuous
# (np.float32).
state = state.astype(cpg_table.value.dtype, copy=False) #set data type
dist = dist.astype(np.float32, copy=False)
assert len(state) == len(chunk_pos)
assert len(dist) == len(chunk_pos)
assert np.all((dist > 0) | (dist == dat.CPG_NAN))
group = context_group.create_group(name)
group.create_dataset('state', data=state,
compression='gzip')
group.create_dataset('dist', data=dist,
compression='gzip')
#list(group) = ['state','dist']
if win_stats_meta is not None and opts.cpg_wlen:
log.info('Computing window-based statistics ...')
states = []
dists = []
cpg_states = []
cpg_group = out_group['cpg']
context_group = in_group['cpg']
for output_name in six.iterkeys(cpg_group):
state = context_group[output_name]['state'].value
states.append(np.expand_dims(state, 2))
dist = context_group[output_name]['dist'].value
dists.append(np.expand_dims(dist, 2))
cpg_states.append(cpg_group[output_name].value)
# samples x outputs x cpg_wlen
states = np.swapaxes(np.concatenate(states, axis=2), 1, 2)
dists = np.swapaxes(np.concatenate(dists, axis=2), 1, 2)
cpg_states = np.expand_dims(np.vstack(cpg_states).T, 2)
cpg_dists = np.zeros_like(cpg_states)
states = np.concatenate([states, cpg_states], axis=2)
dists = np.concatenate([dists, cpg_dists], axis=2)
for wlen in opts.win_stats_wlen:
idx = (states == dat.CPG_NAN) | (dists > wlen // 2)
states_wlen = np.ma.masked_array(states, idx)
group = out_group.create_group('win_stats/%d' % wlen)
for name, fun in six.iteritems(win_stats_meta):
stat = fun[0](states_wlen)
if hasattr(stat, 'mask'):
idx = stat.mask
stat = stat.data
if np.sum(idx):
stat[idx] = dat.CPG_NAN
group.create_dataset(name, data=stat, dtype=fun[1],
compression='gzip')
if annos:
log.info('Adding annotations ...')
group = in_group.create_group('annos')
for name, anno in six.iteritems(annos):
group.create_dataset(name, data=anno[chunk_idx],
dtype='int8',
compression='gzip')
chunk_file.close()
log.info('Done!')
return 0
if __name__ == '__main__':
app = App()
app.run(sys.argv)
``` |
{
"source": "JieYang031/TCAG-WGS-CNV-workflow",
"score": 3
} |
#### File: JieYang031/TCAG-WGS-CNV-workflow/format_erds_results.py
```python
import os, sys, re
tags = {}
tag_pos = {}
tags_of_interest = ["FORMAT","INFO"]
for key in tags_of_interest:
tags[key] = {}
def print_tags():
for t in tags.keys():
print t, tags[t]
if len(sys.argv) != 3:
print "<program> <erds vcf output> <formatted results>"
sys.exit(0)
vcf_file_name = sys.argv[1]
out_file_name = sys.argv[2]
t_file_name = out_file_name + ".temp"
if not os.path.isfile(vcf_file_name):
print "erds output not found ..."
sys.exit(1)
if os.path.isfile(out_file_name):
print "Delete file", out_file_name, "and rerun"
sys.exit(1)
header = []
t_file = open(t_file_name ,'w')
o_file = open(out_file_name,'w')
info_header = {}
vcf_file = open(vcf_file_name)
num_samples = 0
vcf_file = open(vcf_file_name)
for line in vcf_file:
line = line.replace("\n","")
if line[0:2] == "##":
words = line.split("<")
if len(words) < 2:
continue
tag = words[0].replace("#","").replace("=","")
id = words[1].split(",")[0].replace("ID=","")
if tag in tags_of_interest:
tags[tag][id]=0
elif line[0]=="#":
line = line.replace("POS","START")
words = line.split("\t")
header = words
new_header = words[0:7]
new_header.extend(tags[words[7]].keys())
num_samples = len(words)-9 + 1
if num_samples == 0:
print "Error: no samples in vcf file!!"
sys.exit(0)
for i in range (1,num_samples):
id = 8+i
for key in tags[words[8]].keys():
new_header.append(words[id] + "|" + key)
print >> t_file, "\t".join(new_header)
else:
words = line.split("\t")
entry = []
entry = words[0:7]
#format "INFO" column
info = words[7].split(";")
info_dets = {}
for t in tags[header[7]].keys():
info_dets[t] = "-"
for i in info:
i_1 = i.split("=")
if i_1[0]=="DB" or i_1[0] == "DS" or i_1[0] == "INV5" or i_1[0] == "INV3":
continue
elif i_1[0]=="PRECISE":
info_dets["IMPRECISE"]="PRECISE"
continue
elif i_1[0]=="IMPRECISE" :
info_dets["IMPRECISE"]=i_1[0]
continue
info_dets[i_1[0]]=i_1[1]
#put together the entry
for t in tags[header[7]].keys():
entry.append(info_dets[t])
#format "FORMAT" column
format_info = words[8].split(":")
for i in range (1,num_samples):
id = 8+i
sample = words[id].split(":")
s_format = {}
for t in tags[header[8]].keys():
s_format[t]="-"
if len(sample) != 1:
for f in range(0,len(format_info)):
s_format[format_info[f]]=sample[f]
#put together the entry
for t in tags[header[8]].keys():
entry.append(s_format[t])
print >> t_file, "\t".join(entry)
vcf_file.close()
t_file.close()
#format temp file
t_file = open(t_file_name)
##sample CHROM START END SVTYPE SIZE sample|CN FILTER sample|REFCN SVLEN ALT IMPRECISE
fixed_column_index = {"CHROM":-1,"START":-1,"END":-1,"SVTYPE":-1,"SAMPLE|CN":-1,"FILTER":-1,"SAMPLE|REFCN":-1,"SVLEN":-1,"ALT":-1,"IMPRECISE":-1,"SIZE":-1}
flex_column_index = {}
sample = re.sub(".*/","",out_file_name).replace(".txt","").replace(".erds.vcf","")
for line in t_file:
line = line.replace("\n","")
if line[0] == "#":
line = line.replace("#","")
words = line.split("\t")
for i in range (0,len(words)):
words[i]=re.sub(r'.*\|','SAMPLE|',words[i])
if words[i] in fixed_column_index.keys():
fixed_column_index[words[i]] = i
for key in fixed_column_index.keys():
if fixed_column_index[key] == -1 and key != "SIZE":
print "Required column " , key, " is missing.."
sys.exit(0)
new_header = "#sample\tCHROM\tSTART\tEND\tSVTYPE\tSIZE\tFILTER\tSAMPLE|REFCN\tSAMPLE|CN\tIMPRECISE\tSVLEN\tALT"
print >> o_file, new_header
else:
words = line.split("\t")
entry = []
chrom = words[fixed_column_index["CHROM"]]
start = words[fixed_column_index["START"]]
end = words[fixed_column_index["END"]]
svtype = words[fixed_column_index["SVTYPE"]]
filter = words[fixed_column_index["FILTER"]]
refcn = words[fixed_column_index["SAMPLE|REFCN"]]
samplecn = words[fixed_column_index["SAMPLE|CN"]]
imprecise = words[fixed_column_index["IMPRECISE"]]
svlen = words[fixed_column_index["SVLEN"]]
alt = words[fixed_column_index["ALT"]]
if end == "-":
end = start
entry.extend([sample,words[fixed_column_index["CHROM"]],start,end,svtype,`int(end)-int(start)+1`,filter,refcn,samplecn,imprecise,svlen,alt])
print >> o_file, "\t".join(entry)
t_file.close()
o_file.close()
```
#### File: JieYang031/TCAG-WGS-CNV-workflow/IQR_samtools_depth.py
```python
import argparse
import CNVworkflowlib
####################################
### Parse command-line arguments ###
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("samtools_depth_filename", type=str, nargs="?", default="-")
args = parser.parse_args()
#####################################
def find_cumulative_val(v, total, percent):
cumulative_sum = 0
for i in sorted(list(v.keys())):
cumulative_sum = cumulative_sum + v[i]
if cumulative_sum > total * (percent / 100):
quartile_num = i
break
return(quartile_num)
samtools_depth_file = CNVworkflowlib.file_or_stdin(args.samtools_depth_filename)
depth_count = {}
total = 0
for line in samtools_depth_file:
depth = int(line.rstrip().split("\t")[2])
if depth not in depth_count:
depth_count[depth] = 0
depth_count[depth] += 1
total += 1
upper_quartile_num = find_cumulative_val(depth_count, total, 75)
lower_quartile_num = find_cumulative_val(depth_count, total, 25)
IQR = upper_quartile_num - lower_quartile_num
print(IQR)
```
#### File: JieYang031/TCAG-WGS-CNV-workflow/MANTA.py
```python
import sys
from VCF import VCF
from VCF import VCF_filter
ZYG={'0/1':'HET', '1/1':'HOMO', './.':'UNKNOWN', '0/0':'REF'}
class MANTA (VCF):
def __init__ (self, name):
super (MANTA, self).__init__(name)
def filter_func(self, chrom, beg, end, type, E):
if int(end) - int (beg) < 50: return True
return False
def run (self, what=None):
super (MANTA, self).run('MANTA', False, self.filter_func, what=what)
# =========================================================================
class MANTA_filter:
chrom_col=0
start_col=1
end_col=2
size_col=3
type_col=4
filter_col=5
prog_col=6
other_col=7
@staticmethod
def is_good (h):
if h['GT'] == '0/0': return False
return True
# =========================================================================
class MANTA_filter_mix:
def __init__ (self, fname, use_default, min_pe=5, min_sr=5, min_qual=0.0):
self.tab_file=fname
self.qual=min_qual
self.min_pe=float(min_pe)
self.min_sr=float(min_sr)
self.allelic_ratio=0.2
if self.qual=='DEFAULT':
self.qual=-1
else:
self.qual=float(self.qual)
print >> sys.stderr, 'min_pe =', min_pe
print >> sys.stderr, 'min_sr =', min_sr
print >> sys.stderr, 'min_qual =', min_qual
def is_good (self, E):
h={}
F=E[MANTA_filter.other_col].split ('|')
info=F[0].split (';')
names=F[1].split(':')
values=F[2].split(':')
for a in info:
b=a.split ('=')
if len(b) == 1: val = b[0]
else:
val=b[1]
h[b[0]]=val
for i, nm in enumerate (names):
h[nm] = values[i]
if not MANTA_filter.is_good (h):
return False
if not h.has_key ('PR'): h['PR']="0,0"
if not h.has_key ('SR'): h['SR']="0,0"
h['PR']=map(lambda x: int(x), h['PR'].split(','))
h['SR']=map(lambda x: int(x), h['SR'].split(','))
ref_pe=int(h['PR'][0])
alt_pe=int(h['PR'][1])
ref_sr=int(h['SR'][0])
alt_sr=int(h['SR'][1])
total=1.0*(ref_pe+alt_pe+ref_sr+alt_sr)
#if (alt_pe + alt_sr) / total < self.allelic_ratio:
# return False
flag=(alt_pe >= self.min_pe or
(alt_pe+alt_sr) >= (self.min_pe+self.min_sr)/2.0 or
alt_sr >= self.min_sr)
#print >> sys.stderr, ref_pe, ref_sr, alt_pe, alt_sr, (alt_pe + alt_sr) / total, flag
if not flag: return False
if self.qual > -1:
if float (h['QUAL']) < self.qual:
return False
else:
# Note we use the overall filter
if not E[MANTA_filter.filter_col] == 'PASS':
return False
return True
# -------------------------------------------------------------- |
def dofilter (self, outfile='/dev/stdout', Filter=False):
in_f=open (self.tab_file)
out_f=open (outfile, 'w')
for line in in_f:
E=line.rstrip('\n').split()
if not self.is_good(E): continue
print line,
out_f.close()
in_f.close()
# =======================================================================
class MANTA_filter_qual:
def __init__ (self, fname, use_default, min_pe=5, min_sr=5, min_qual=0.0):
self.tab_file=fname
self.qual=float(min_qual)
def is_good (self, E):
h={}
F=E[MANTA_filter.other_col].split ('|')
info=F[0].split (';')
names=F[1].split(':')
values=F[2].split(':')
for a in info:
b=a.split ('=')
if len(b) == 1: val = b[0]
else:
val=b[1]
h[b[0]]=val
for i, nm in enumerate (names):
h[nm] = values[i]
if not MANTA_filter.is_good (h):
return False
if float (h['QUAL']) < self.qual:
return False
return True
# -------------------------------------------------------------- |
def dofilter (self, outfile='/dev/stdout', Filter=False):
in_f=open (self.tab_file)
out_f=open (outfile, 'w')
for line in in_f:
E=line.rstrip('\n').split()
if not self.is_good(E): continue
print line,
out_f.close()
in_f.close()
# =======================================================================
class MANTA_filter_default:
def __init__ (self, fname, use_default, min_dv=5, min_pe=5, min_sr=5, min_qual=0):
self.tab_file=fname
self.dv=min_dv
self.sr=min_sr
self.pe=min_pe
self.use_default=use_default
def is_good (self, E):
if self.use_default:
if E[MANTA_filter.filter_col] == 'PASS':
return True
else:
return False
return True
# -------------------------------------------------------------- |
def dofilter (self, outfile='/dev/stdout', Filter=False):
in_f=open (self.tab_file)
out_f=open (outfile, 'w')
for line in in_f:
E=line.rstrip('\n').split()
if not self.is_good(E): continue
print line,
out_f.close()
in_f.close()
# =======================================================================
class MANTA_param:
chrom_col=0
start_col=1
end_col=2
size_col=3
type_col=4
filter_col=5
prog_col=6
other_col=7
diff_col=8
confirm_col=9
def __init__ (self, fname):
self.tab_file=fname
# ------------------------------------------------
def get_param (self, E):
h={}
F=E[MANTA_param.other_col].split ('|')
gt_name=F[-2]
gt_info=F[-1]
names=gt_name.split(':')
GT=gt_info.split(':')
for nm, val in zip(names, GT):
h[nm]=val
pr_ref='0'
pr_var='0'
sr_ref='0'
sr_var='0'
if h.has_key ('PR'):
X=h['PR'].split(',')
pr_ref=X[0]
pr_var=X[1]
if h.has_key ('SR'):
X=h['SR'].split(',')
sr_ref=X[0]
sr_var=X[1]
for ele in F[0].split(';'):
print >> sys.stderr, ele
x=ele.split('=')
if len (x) > 1:
h[x[0]] = x[1]
else:
h[x[0]] = x[0]
zygosity=E[7].split('|')
zygosity=zygosity[-1].split(':')
confirm_count = 0
if not E[MANTA_param.confirm_col] == 'NotApplicable':
X=E[MANTA_param.confirm_col].split('|')
confirm_count=len(X)
print >> sys.stderr, pr_ref, pr_var, sr_ref, sr_var
return '\t'.join ( [':'.join (E[0:3]), h['QUAL'] ] + \
[pr_ref, pr_var, sr_ref, sr_var]+ \
[str(confirm_count)] + \
[ZYG[zygosity[0]]])
# ------------------------------------------------
def run (self, outfile='/dev/stdout'):
in_f=open (self.tab_file)
out_f=open (outfile, 'w')
print >> out_f, '\t'.join ( [ 'SIZE',
'QUAL',
'PR_REF',
'PR_VAR',
'SR_REF',
'SR_VAR',
'COUNT',
'ZYG'] )
for line in in_f:
E=line.rstrip('\n').split()
x=self.get_param (E)
print >> out_f, x
out_f.close()
in_f.close()
# =======================================================================
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.