metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JinlongXuHIT/DjangoB2CDemo",
"score": 2
} |
#### File: celery_tasks/sms/tasks.py
```python
import logging
from celery_tasks.main import app
from .yuntongxun.sms import CCP
logger = logging.getLogger('django')
# 验证码模板
SMS_CODE_TEMP_ID = 1
@app.task(name = 'send_sms_code')
def send_sms_code(mobile,sms_code,expires):
try:
ccp = CCP()
result = ccp.send_template_sms(mobile,[sms_code,expires],SMS_CODE_TEMP_ID)
except Exception as e:
logger.error('发送验证码短信[异常][mobile: %s,message: %s ]' % (mobile,e))
else:
if result==0:
logger.info('送验证码短信[正常][mobile: %s ]' % (mobile))
else:
logger.warning('送验证码短信[失败][mobile: %s ]' % mobile)
```
#### File: apps/oauth/views.py
```python
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework.views import APIView
from QQLoginTool.QQtool import OAuthQQ
from django.conf import settings
from rest_framework_jwt.settings import api_settings
from .utils import generate_save_user_token
from .models import OAuthQQUser
# Create your views here.
# url(r'^qq/authorization/$', views.QQAuthURLView.as_view()),
class QQAuthURLView(APIView):
def get(self, request):
# 1. 获取next餐朱
# 2. 创建 oauth对象\
# 3. 生成额login_url
# 4. 返回login_url
next = request.query_params.get('next', '/')
oauth = OAuthQQ(client_id=settings.QQ_CLIENT_ID, client_secret=settings.QQ_CLIENT_SECRET,
redirect_uri=settings.QQ_REDIRECT_URI, state=next)
login_url = oauth.get_qq_url()
return Response({
"login_url": login_url
})
# url(r'^qq/user/$', views.QQAuthUserView.as_view()),
class QQAuthUserView(APIView):
def get(self, request):
"""
客户端端会发送code参数
1. 获取code
2. 获取access_token
3. 获取openid
4. 查询 OAuthQQUser 是否有openid=openid的这一条记录
5. 如果有:
返回 username,user_id,token
6. 如果没有:
返回 access_token ---> 被加密openid
:param request:
:return:
"""
code = request.query_params.get('code', None)
if not code:
return Response({'message': '必须提交code'}, status=400)
oauth = OAuthQQ(client_id=settings.QQ_CLIENT_ID, client_secret=settings.QQ_CLIENT_SECRET,
redirect_uri=settings.QQ_REDIRECT_URI, state=next)
access_token = oauth.get_access_token(code)
openid = oauth.get_open_id(access_token)
users = OAuthQQUser.objects.filter(openid=openid)
# 判断是否绑定了本地用户
if users:
# 已经绑定就返回token
oauth_user = users[0]
username = oauth_user.user.username
user_id = oauth_user.user.id
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(oauth_user.user)
token = jwt_encode_handler(payload)
return Response({
'username': username,
'user_id': user_id,
'token': token
})
else:
return Response({
'access_token': generate_save_user_token(openid) # 加密
})
def post(self,request):
"""
客户端提交的数据
1. mobile,password,sms_code,access_token
:param request:
:return:
"""
pass
``` |
{
"source": "jinlxz/CommandArgsParser",
"score": 4
} |
#### File: jinlxz/CommandArgsParser/cmd_arg_parser.py
```python
import sys
class cmd_arg_parser(object):
def __init__(self,cmd_args):
self.index=1
"""dictionary to store all command line arguments after parsing the command line successfully.
the dictionary stores switch-styled options, key-value pair options, list of positional arguments.
you can refer to all the command line options by referring to this variable as follows:
cmd_arg_parser.real_cmd_options[option1]
cmd_arg_parser.real_cmd_options[option2]
cmd_arg_parser.real_cmd_options["position_arg_list"]
the option1,option2 keys are defined by devepers in self.cmd_args_list, refer to self.cmd_args_list for more information.
the value for position_arg_list key is a list of all positional arguments.
switch-styled options have the value True or False
"""
self.real_cmd_options={}
#the original list of command line arguments.
self.cmd_args_list=cmd_args
"""dictionary to represent all valid command line options, developer can add user-defined options here to add the options to the application.
adding a switch-styled option refer to the following format.
<option_name_cmd>:(self.process_bool_option,"<option_name_internal>")
adding a option of key-value pair refer to the following format.
<option_name_cmd>:(self.process_keyvalue_option,"<option_name_internal>")
option_name_cmd is the name of the option used in command line, type string.
option_name_internal is the name of the option as a key to stored in the self.real_cmd_options dictionary, type string.
"""
self.cmd_args_map={
"--help":(self.display_help,"help"),
"-h":(self.display_help,"help"),
"--version":(self.display_version,"version"),
"-ver":(self.display_version,"version"),
"--openfile":(self.process_keyvalue_option,"openfile"),
"--enable-smp":(self.process_bool_option,"enable-smp"),
"--":(self.process_option_separator,"separator")
}
def get_cmd_function(self,arg):
if self.cmd_args_map.get(arg) is not None:
return self.cmd_args_map.get(arg)
else:
if arg.startswith("-"):
print "invalid option {0}".format(arg)
return (self.display_help,"help")
else:
return (self.get_position_arg_list,"position_arg_list")
def process_bool_option(self,name):
self.real_cmd_options[name]=True;
self.index+=1
def process_keyvalue_option(self,name):
self.index+=1
self.real_cmd_options[name]=self.cmd_args_list[self.index]
self.index+=1
def display_help(self,name):
print "help information"
sys.exit(0)
def display_version(self,name):
print "version 1.0"
sys.exit(0)
def get_position_arg_list(self,name):
self.real_cmd_options[name]=self.cmd_args_list[self.index:]
self.index=len(self.cmd_args_list)
def process_option_separator(self,name):
self.index+=1
self.get_position_arg_list(name)
def parse_cmd_args(self):
while self.index<len(self.cmd_args_list):
p_arg = self.cmd_args_list[self.index]
arg_process_func,arg_name=self.get_cmd_function(p_arg)
arg_process_func(arg_name)
if __name__=="__main__":
cmd_parser=cmd_arg_parser(sys.argv)
cmd_parser.parse_cmd_args()
print cmd_parser.real_cmd_options
``` |
{
"source": "jinlygenius/sparrow_cloud",
"score": 2
} |
#### File: management/commands/rabbitmq_consumer.py
```python
from django.core.management.base import BaseCommand
from ._sparrow_rabbitmq_consumer import rabbitmq_consumer
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'sparrow_rabbitmq_consumer'
def add_arguments(self, parser):
parser.add_argument('--queue', dest="queue", default='', type=str)
def handle(self, *args, **kwargs):
import pdb;pdb.set_trace()
queue = kwargs.get('queue', None)
if queue:
rabbitmq_consumer(queue=queue)
else:
logger.error('请在调用命令时传入参数:--queue')
print('请在调用命令时传入参数:--queue')
```
#### File: sparrow_cloud/restclient/requests_client.py
```python
import requests
import logging
from django.conf import settings
from sparrow_cloud.utils.build_url import build_url
from sparrow_cloud.utils.get_acl_token import get_acl_token
from requests.exceptions import ConnectTimeout, ConnectionError, ReadTimeout
from sparrow_cloud.registry.service_discovery import consul_address
logger = logging.getLogger(__name__)
def get_settings_service_name():
"""获取settings中的配置"""
value = getattr(settings, 'SERVICE_CONF', '')
if value == '':
return ''
service_name = value.get('NAME', '')
return service_name
def request(method, service_conf, api_path, timeout, retry_times, *args, **kwargs):
error_message = None
service_name = get_settings_service_name()
request_service = service_conf['VALUE']
acl_token = get_acl_token(service_name)
if acl_token is not None:
params = kwargs.get('params', {})
params['acl_token'] = acl_token
kwargs['params'] = params
address_list = consul_address(service_conf)
exclude_addr = []
_address = None
for _ in range(int(retry_times)):
if len(address_list) > 1 and isinstance(address_list, list):
[address_list.remove(_) for _ in exclude_addr if _ in address_list]
try:
url, address = build_url(address_list, api_path)
_address = address
res = requests.request(method=method, url=url, timeout=timeout, *args, **kwargs)
return res
except (ConnectionError, ConnectTimeout, ReadTimeout)as ex:
exclude_addr.append(_address)
error_message = ex.__str__()
logger.error("requests_client error,service_name:{}, request_service:{}, api_path:{}, message:{}, retry:{}"
.format(service_name, request_service, api_path, error_message, int(_) + 1))
raise Exception("requests_client error, service_name: {}, request_service:{}, api_path:{}, message: {}"
.format(service_name, request_service, api_path, error_message))
def get(service_conf, api_path, timeout=10, retry_times=3, *args, **kwargs):
return request('get', service_conf, api_path, timeout, retry_times, *args, **kwargs)
def post(service_conf, api_path, timeout=10, retry_times=3, *args, **kwargs):
return request('post', service_conf, api_path, timeout, retry_times, *args, **kwargs)
def put(service_conf, api_path, timeout=10, retry_times=3, *args, **kwargs):
return request('put', service_conf, api_path, timeout, retry_times, *args, **kwargs)
def delete(service_conf, api_path, timeout=10, retry_times=3, *args, **kwargs):
return request('delete', service_conf, api_path, timeout, retry_times, *args, **kwargs)
```
#### File: sparrow_cloud/utils/validation_acl.py
```python
import jwt
import logging
from django.conf import settings
logger = logging.getLogger(__name__)
def get_acl_public_key():
"""get acl public """
acl_middleware_value = getattr(settings, 'ACL_MIDDLEWARE', None)
if acl_middleware_value:
acl_private_key = acl_middleware_value.get('ACL_PUBLIC_KEY', None)
if acl_private_key is None or acl_private_key == '':
logging.error('sparrow_cloud error: ACL_PUBLIC_KEY not configured in ACL_MIDDLEWARE')
raise Exception("sparrow_cloud error: ACL_PUBLIC_KEY not configured in ACL_MIDDLEWARE")
return acl_private_key
logging.error('sparrow_cloud error: ACL_MIDDLEWARE not configured')
raise Exception("sparrow_cloud error: ACL_MIDDLEWARE not configured")
def validation_acl(acl_token):
"""validation acl token"""
public_key = get_acl_public_key()
try:
payload = jwt.decode(acl_token, public_key, algorithms='RS256')
logging.info('sparrow_cloud: acl_validated:{}'.format(payload))
return True, payload
except Exception as ex:
logging.info('sparrow_cloud: validation error, acl_token:{}'.format(acl_token))
return False, {}
```
#### File: sparrow_cloud/tests/test_acl_middleware.py
```python
import os
import jwt
import time
import unittest
from django.conf import settings
from django.test import RequestFactory
from sparrow_cloud.middleware.acl_middleware import ACLMiddleware
from django.http import JsonResponse
os.environ["DJANGO_SETTINGS_MODULE"] = "tests.mock_settings"
def token():
private_key = settings.PRIVATE_KEY
payload = {
"service_name": 'test',
"ALL": 1,
"permission": [],
"exp": int(time.time() + 60*60),
"iat": int(time.time()),
"iss": "ACL"
}
acl_token = jwt.encode(payload, private_key, algorithm='RS256')
return acl_token
class TestACLMiddleware(unittest.TestCase):
rf = RequestFactory()
TOKEN = token()
def setUp(self):
os.environ["DJANGO_SETTINGS_MODULE"] = "tests.mock_settings"
def test_acl(self):
"""测试 没有remote_user 和错误的 acl token """
request = self.rf.get('/acl', {'acl_token': '<KEY>'})
self.assertEqual(ACLMiddleware().process_request(request).status_code,
JsonResponse({"message": "ACL验证未通过"}, status=403).status_code)
def test_acl1(self):
"""测试 没有remote_user 和正确的 acl token """
request = self.rf.get('/acl', {'acl_token': self.TOKEN})
self.assertEqual(ACLMiddleware().process_request(request), None)
def test_acl2(self):
"""测试 有remote_user 和正确的 acl token """
rf1 = RequestFactory(REMOTE_USER='sssssssss')
request = rf1.get('/acl', {'acl_token': self.TOKEN})
self.assertEqual(ACLMiddleware().process_request(request), None)
def test_acl3(self):
"""测试 有remote_user 和错误的 acl token """
rf1 = RequestFactory(REMOTE_USER='sssssssss')
request = rf1.get('/acl', {'acl_token': '123dsafsafaq321dsknfdsj358q744'})
self.assertEqual(ACLMiddleware().process_request(request).status_code,
JsonResponse({"message": "ACL验证未通过"}, status=403).status_code)
def test_acl4(self):
"""测试 没有 acl token """
request = self.rf.get('/acl')
self.assertEqual(ACLMiddleware().process_request(request), None)
def tearDown(self):
del os.environ["DJANGO_SETTINGS_MODULE"]
if __name__ == '__main__':
unittest.main()
```
#### File: sparrow_cloud/tests/test_get_acl_token.py
```python
import os
import time
import unittest
from django.conf import settings
from sparrow_cloud.utils.get_hash_key import get_hash_key
from sparrow_cloud.utils.get_acl_token import get_acl_token
class TestGetACLToken(unittest.TestCase):
def setUp(self):
os.environ["DJANGO_SETTINGS_MODULE"] = "tests.mock_settings"
def test_get_acl_token_from_settings(self):
acl_token_key = get_hash_key()
setattr(settings, acl_token_key, {'acl_token': 'ACL_TOKEN<PASSWORD>', 'time': time.time()})
self.assertEqual(get_acl_token('acl_test'), 'ACL_TOKEN8<PASSWORD>')
def test_get_acl_token_from_cache(self):
from django.core.cache import cache
acl_token_key = get_hash_key()
cache.set(acl_token_key, {'acl_token': 'ACL_TOKEN<PASSWORD>', 'time': time.time()})
self.assertEqual(get_acl_token('acl_test'), 'ACL_TOKEN1<PASSWORD>')
def test_settings_not_acl_conf(self):
settings.ACL_MIDDLEWARE = None
self.assertEqual(get_acl_token('acl_test'), None)
```
#### File: sparrow_cloud/tests/test_register_command.py
```python
import unittest
from unittest import mock
import django
from io import StringIO
from django.core.management import call_command
import os
from django.conf.urls import url
from django.http import HttpResponse
def detail(request, question_id):
return HttpResponse("You're looking at question %s." % question_id)
urlpatterns = [
url(r'^/ssss/xxx/$', detail),
url(r'^/ssuuu/xxddx/$', detail),
]
class RestClientTestCase(unittest.TestCase):
def setUp(self):
os.environ["DJANGO_SETTINGS_MODULE"] = "tests.mock_settings"
@mock.patch('sparrow_cloud.restclient.rest_client.post', return_value={})
def test_register_command_list(self, mock_post):
from django.conf import settings
self.setup_settings(settings)
django.setup()
out = StringIO()
os.environ["PERMISSION_SERVICE_HOST"] = "127.0.0.1:8001"
call_command('register_api_permission', '-d', '2', '-l', stdout=out)
self.assertEqual(out.read(), '')
@mock.patch('sparrow_cloud.restclient.rest_client.post', return_value={})
def test_register_command(self, mock_post):
from django.conf import settings
self.setup_settings(settings)
django.setup()
out = StringIO()
os.environ["PERMISSION_SERVICE_HOST"] = "127.0.0.1:8001"
call_command('register_api_permission', '-d', '2', stdout=out)
self.assertEqual(out.read(), '')
def tearDown(self):
del os.environ["PERMISSION_SERVICE_HOST"]
def setup_settings(self, settings):
settings.XX = "1"
settings.SECRET_KEY = "ss"
settings.ROOT_URLCONF = __name__
settings.SPARROW_PERMISSION_REGISTER_CONF = {
"PERMISSION_SERVICE": {
"ENV_NAME": "PERMISSION_SERVICE_HOST",
"VALUE": "xxxxx-svc"
},
"API_PATH": "/api/permission_i/register/"
}
settings.SERVICE_CONF = {
"NAME": "permiss"
}
settings.CONSUL_CLIENT_ADDR = {
"HOST": os.environ.get("CONSUL_IP", "127.0.0.1"), # 在k8s上的环境变量类型:变量/变量引用
"PORT": os.environ.get("CONSUL_PORT", 8500)
}
``` |
{
"source": "jinmang2/boostcamp_ai_tech_2",
"score": 2
} |
#### File: nlp/ch04_rnn_variant/rnn_variant.py
```python
import math
import numbers
from typing import Optional, Tuple, Sequence, Union, Any
import torch
import torch.nn as nn
_TensorOrTensors = Union[torch.Tensor, Sequence[torch.Tensor]]
"""
@TODO
- check code
(h_torch, c_torch) = _VF.lstm_cell(
x, hx,
weight_ih, weight_hh,
bias_ih, bias_hh
)
h_torch = _VF.gru_cell(
x, hx,
weight_ih, weight_hh,
bias_ih, bias_hh
)
"""
class LSTMCell(nn.Module):
"""
LSTM Cell class
torch.nn.modules.rnn.LSTMCell 대비 훨씬 느림
atol=1e-8, rtol=1e-4 안에서 기존 LSTMCell의 연산과 동일
학습 목적으로 보길 바랍니다!
"""
__constants__ = ['input_size', 'hidden_size', 'bias']
def __init__(self, input_size: int, hidden_size: int, bias: bool = True,
device=None, dtype=None):
super().__init__()
factory_kwargs = {'device': device, 'dtype': dtype}
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
# Weight와 Bias 선언!
# projected version이 아니라 전부 따로 구현!
# parameter 초기화 부분을 제외하면 `nn.Linear`와 동일함
# torch의 Linear는 weight를 kaiming uniform,
# bias를 1 / math.sqrt(fan_in)의 bound를 가지는 단순 uniform으로 초기화
# rnn의 초기화는 아래 `reset_parameters` 메서드를 참고.
num_chunks = 4
self.weight_ih = nn.Parameter(torch.empty((num_chunks * hidden_size, input_size), **factory_kwargs))
self.weight_hh = nn.Parameter(torch.empty((num_chunks * hidden_size, hidden_size), **factory_kwargs))
if bias:
self.bias_ih = nn.Parameter(torch.empty(num_chunks * hidden_size, **factory_kwargs))
self.bias_hh = nn.Parameter(torch.empty(num_chunks * hidden_size, **factory_kwargs))
else:
# bias를 사용하지 않더라도 접근할 수 있도록 등록
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
# 파라미터 초기화
self.reset_parameters()
def forward(
self,
input: torch.Tensor,
hidden: Optional[Tuple[torch.Tensor]] = None
) -> torch.Tensor:
"""
.. math::
\begin{array}{ll} \\
i_t = \sigma(W_{ii} x_t + b_{ii} + W_{hi} h_{t-1} + b_{hi}) \\
f_t = \sigma(W_{if} x_t + b_{if} + W_{hf} h_{t-1} + b_{hf}) \\
g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hg} h_{t-1} + b_{hg}) \\
o_t = \sigma(W_{io} x_t + b_{io} + W_{ho} h_{t-1} + b_{ho}) \\
c_t = f_t \odot c_{t-1} + i_t \odot g_t \\
h_t = o_t \odot \tanh(c_t) \\
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the cell
state at time `t`, :math:`x_t` is the input at time `t`, :math:`h_{t-1}`
is the hidden state of the layer at time `t-1` or the initial hidden
state at time `0`, and :math:`i_t`, :math:`f_t`, :math:`g_t`,
:math:`o_t` are the input, forget, cell, and output gates, respectively.
:math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
"""
# initial hidden state가 없을 경우 zeros로 초기화
# direction, layer별 전부 초기 은닉 상태가 있어야 한다.
if hidden is None:
zeros = torch.zeros(input.size(0), self.hidden_size,
dtype=input.dtype, device=input.device)
hidden = (zeros, zeros)
i = torch.sigmoid(self.gate_wo_act(x, hx[0], 0)) # input gate
f = torch.sigmoid(self.gate_wo_act(x, hx[0], 1)) # forget gate
g = torch.tanh(self.gate_wo_act(x, hx[0], 2)) # gate gate
o = torch.sigmoid(self.gate_wo_act(x, hx[0], 3)) # output gate
next_cell = f * hx[1] + i * g
next_hidden = o * torch.tanh(c)
return (next_hidden, next_cell)
def gate_wo_act(self, x: torch.Tensor, hx: torch.Tensor, i: int) -> torch.Tensor:
w_ih = self.weight_ih[i*self.hidden_size:(i+1)*hidden_size, :]
w_hh = self.weight_hh[i*self.hidden_size:(i+1)*hidden_size, :]
b_ih, b_hh = torch.zeros(2, device=x.device, dtype=x.dtype)
if self.bias:
b_ih = self.bias_ih[i*self.hidden_size:(i+1)*hidden_size]
b_hh = self.bias_hh[i*self.hidden_size:(i+1)*hidden_size]
return x @ w_ih.T + hx @ w_hh.T + b_ih + b_hh
@torch.no_grad()
def backward(self, grad_outputs: _TensorOrTensors):
"""
Elman LSTMCell의 backward pass
torch.autograd.Function없이 naive하게 구현
"""
return None
def extra_repr(self) -> str:
s = '{input_size}, {hidden_size}'
if 'bias' in self.__dict__ and self.bias is not True:
s += ', bias={bias}'
return s.format(**self.__dict__)
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
torch.nn.init.uniform_(weight, -stdv, stdv)
class GRUCell(nn.Module):
"""
GRU Cell class
torch.nn.modules.rnn.GRUCell 대비 훨씬 느림
atol=1e-8, rtol=1e-4 안에서 기존 LSTMCell의 연산과 동일
학습 목적으로 보길 바랍니다!
"""
__constants__ = ['input_size', 'hidden_size', 'bias']
def __init__(self, input_size: int, hidden_size: int, bias: bool = True,
device=None, dtype=None):
super().__init__()
factory_kwargs = {'device': device, 'dtype': dtype}
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
# Weight와 Bias 선언!
# projected version이 아니라 전부 따로 구현!
# parameter 초기화 부분을 제외하면 `nn.Linear`와 동일함
# torch의 Linear는 weight를 kaiming uniform,
# bias를 1 / math.sqrt(fan_in)의 bound를 가지는 단순 uniform으로 초기화
# rnn의 초기화는 아래 `reset_parameters` 메서드를 참고.
num_chunks = 3
self.weight_ih = nn.Parameter(torch.empty((num_chunks * hidden_size, input_size), **factory_kwargs))
self.weight_hh = nn.Parameter(torch.empty((num_chunks * hidden_size, hidden_size), **factory_kwargs))
if bias:
self.bias_ih = nn.Parameter(torch.empty(num_chunks * hidden_size, **factory_kwargs))
self.bias_hh = nn.Parameter(torch.empty(num_chunks * hidden_size, **factory_kwargs))
else:
# bias를 사용하지 않더라도 접근할 수 있도록 등록
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
# 파라미터 초기화
self.reset_parameters()
def forward(
self,
input: torch.Tensor,
hidden: Optional[Tuple[torch.Tensor]] = None
) -> torch.Tensor:
"""
.. math::
\begin{array}{ll}
r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\
h_t = (1 - z_t) * n_t + z_t * h_{(t-1)}
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input
at time `t`, :math:`h_{(t-1)}` is the hidden state of the layer
at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`,
:math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively.
:math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
"""
# initial hidden state가 없을 경우 zeros로 초기화
# direction, layer별 전부 초기 은닉 상태가 있어야 한다.
if hidden is None:
zeros = torch.zeros(input.size(0), self.hidden_size,
dtype=input.dtype, device=input.device)
hidden = zeros
r = torch.sigmoid(self.gate_wo_act(x, hx, 0)) # reset gate
z = torch.sigmoid(self.gate_wo_act(x, hx, 1)) # update gate
n = torch.tanh(self.gate_wo_act(x, hx, 2, reset=r)) # candidate activation vector
next_hidden = (1 - z) * n + z * hx
return next_hidden
def gate_wo_act(
self,
x: torch.Tensor,
hx: torch.Tensor,
i: int,
reset: Optional[torch.Tensor] = None
) -> torch.Tensor:
w_ih = self.weight_ih[i*self.hidden_size:(i+1)*hidden_size, :]
w_hh = self.weight_hh[i*self.hidden_size:(i+1)*hidden_size, :]
b_ih, b_hh = torch.zeros(2, device=x.device, dtype=x.dtype)
if self.bias:
b_ih = self.bias_ih[i*self.hidden_size:(i+1)*hidden_size]
b_hh = self.bias_hh[i*self.hidden_size:(i+1)*hidden_size]
x_part = x @ w_ih.T + b_ih
h_part = hx @ w_hh.T + b_hh
if reset is None:
reset = torch.ones_like(h_part, dtype=h_part.dtype, device=h_part.device)
return x_part + reset * h_part
@torch.no_grad()
def backward(self, grad_outputs: _TensorOrTensors):
"""
Elman LSTMCell의 backward pass
torch.autograd.Function없이 naive하게 구현
"""
return None
def extra_repr(self) -> str:
s = '{input_size}, {hidden_size}'
if 'bias' in self.__dict__ and self.bias is not True:
s += ', bias={bias}'
return s.format(**self.__dict__)
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
torch.nn.init.uniform_(weight, -stdv, stdv)
```
#### File: pytorch/ch05_dataset/samplers.py
```python
import numpy as np
import torch
from torch._six import int_classes as _int_classes
from torch import Tensor
from typing import Iterator, Optional, Sequence, List, TypeVar, Generic, Sized
T_co = TypeVar('T_co', covariant=True)
class Sampler(Generic[T_co]):
""" 모든 Sampler의 Base class """
def __init__(self, data_source: Optional[Sized]) -> None:
pass
def __iter__(self) -> Iterator[T_co]:
raise NotImplementedError
class SequentialSampler(Sampler[int]):
""" 요소를 같은 순서로 sequentially하게 샘플링 """
data_source: Sized
def __init__(self, data_source):
self.data_source = data_source
def __iter__(self):
return iter(range(len(self.data_source)))
def __len__(self) -> int:
return len(self.data_source)
class RandomSampler(Sampler[int]):
"""
요소를 임의로 추출!
w/o replacement --> shuffled dataset에서 샘플링
with replacement -> :attr:`num_samples`를 지정하여 draw 가능
"""
data_source: Sized
replacement: bool
def __init__(self, data_source: Sized, replacement: bool = False,
num_samples: Optional[int] = None, generator=None) -> None:
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
self.generator = generator
if not isinstance(self.replacement, bool):
raise TypeError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self) -> int:
# dataset size might change at runtime
if self._num_sample is None:
return len(self.data_source)
return self._num_samples
def __iter__(self):
n = len(self.data_source)
if self.generator is None:
generator = torch.Generator()
generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
else:
generator = self.generator
if self.replacement:
for _ in range(self.num_samples // 32):
yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=generator).tolist()
yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=generator).tolist()
else:
yield from torch.randperm(n, generator=self.generator).tolist()
def __len__(self):
return self.num_samples
class SubsetRandomSampler(Sampler[int]):
""" replacement없이 주어진 index list에서 요소를 임의로 추출 """
indices: Sequence[int]
def __init__(self, indices: Sequence[int], generator=None) -> None:
self.indices = indices
self.generator = generator
def __iter__(self):
return (self.indices[i] for i in torch.randperm(len(self.indices), generator=self.generator))
def __len__(self):
return len(self.indices)
class WeightedRandomSampler(Sampler[int]):
"""
주어진 확률로 [0,...,len(weights)-1]에서 요소를 추출!
% weights의 합이 1일 필욘 없음!
"""
weights: Tensor
num_samples: int
replacement: bool
def __init__(self, weights: Sequence[float], num_samples: int,
replacementL bool = True, generator=None) -> None:
if not isinstance(num_samples, _int_classes) or isinstance(num_samples, bool) or \
num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = torch.as_tensor(weights, dtype=torch.double)
self.num_samples = num_samples
self.replacement = replacement
self.generator = genearator
def __iter__(self):
rand_tensor = torch.multinomial(
inputs=self.weights,
num_samples=self.num_samples,
replacement=self.replacement,
generator=self.generator,
)
return iter(rand_tensor.tolist())
def __len__(self):
return self.num_samples
# https://github.com/doc2dial/sharedtask-dialdoc2021/blob/master/scripts/subtask2/utils.py
def sortish_sampler_indices(
data: List,
batch_size: int,
shuffle: bool = True
) -> np.array:
if not shuffle:
return np.argsort(np.array(data) * -1)
def key_fn(i):
return data[i]
idxs = np.random.permutation(len(data))
sz = batch_size * 50
ck_idx = [idxs[i:i+sz] for i in range(0, len(idxs), sz)]
sort_idx = np.concatenate([sorted(s, key=key_fn, reverse=True) for s in ck_idx])
sz = batch_size
ck_idx = [idxs[i:i+sz] for i in range(0, len(sort_idx), sz)]
max_ck = np.argmax([key_fn(ck[0]) for ck in ck_idx]) # find the chunk with the largest key,
ck_idx[0], ck_idx[max_ck] = ck_idx[max_ck], ck_idx[0] # then make sure it goes first.
sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([], dtype=np.int)
sort_idx = np.concatenate((ck_idx[0], sort_idx))
return sort_idx
# fastai에서 가져온 클래스
class SortishSampler(Sampler):
""" sequence length순으로 정렬된 text data를 추출! (+ a bit of randomness) """
def __init__(self, data, batch_size, shuffle=True):
self.data = data # src_len의 list!
self.batch_size = batch_size
self.shuffle = shuffle
def __len__(self) -> int:
return len(self.data)
def __iter__(self):
return iter(sortish_sampler_indices(self.data, self.batch_size, self.shuffle))
class BatchSampler(Sampler[List[int]]):
""" 다른 Sampler를 mini-batch화 시켜주는 Wrapper class """
def __init__(self, sampler: Sampler[int], batch_size: int, drop_last: bool) -> None:
# Since collections.abc.Iterable does not check for `__getitem__`, which
# is one way for an object to be an iterable, we don't do an `isinstance`
# check here.
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
# Can only be called if self.sampler has __len__ implemented
# We cannot enforce this condition, so we turn off typechecking for the
# implementation below.
# Somewhat related: see NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
if self.drop_last:
return len(self.sampler) // self.batch_size # type: ignore
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
``` |
{
"source": "jinmang2/DOOLY",
"score": 2
} |
#### File: dooly/converters/kobart_utils.py
```python
import os
import sys
import hashlib
import importlib
def is_available_boto3():
return importlib.util.find_spec("boto3")
if is_available_boto3():
import boto3
from botocore import UNSIGNED
from botocore.client import Config
else:
raise ModuleNotFoundError("Please install boto3 with: `pip install boto3`.")
class AwsS3Downloader(object):
def __init__(
self,
aws_access_key_id=None,
aws_secret_access_key=None,
):
self.resource = boto3.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
).resource("s3")
self.client = boto3.client(
"s3",
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
config=Config(signature_version=UNSIGNED),
)
def __split_url(self, url: str):
if url.startswith("s3://"):
url = url.replace("s3://", "")
bucket, key = url.split("/", maxsplit=1)
return bucket, key
def download(self, url: str, local_dir: str):
bucket, key = self.__split_url(url)
filename = os.path.basename(key)
file_path = os.path.join(local_dir, filename)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
meta_data = self.client.head_object(Bucket=bucket, Key=key)
total_length = int(meta_data.get("ContentLength", 0))
downloaded = 0
def progress(chunk):
nonlocal downloaded
downloaded += chunk
done = int(50 * downloaded / total_length)
sys.stdout.write(
"\r{}[{}{}]".format(file_path, "█" * done, "." * (50 - done))
)
sys.stdout.flush()
try:
with open(file_path, "wb") as f:
self.client.download_fileobj(bucket, key, f, Callback=progress)
sys.stdout.write("\n")
sys.stdout.flush()
except Exception as e: # E722 do not use bare 'except'
print(f"Exception occured: {e}.\ndownloading file is failed. {url}")
return file_path
def download(url, chksum=None, cachedir=".cache"):
cachedir_full = os.path.join(os.getcwd(), cachedir)
os.makedirs(cachedir_full, exist_ok=True)
filename = os.path.basename(url)
file_path = os.path.join(cachedir_full, filename)
if os.path.isfile(file_path):
if hashlib.md5(open(file_path, "rb").read()).hexdigest()[:10] == chksum:
print(f"using cached model. {file_path}")
return file_path, True
s3 = AwsS3Downloader()
file_path = s3.download(url, cachedir_full)
if chksum:
assert (
chksum == hashlib.md5(open(file_path, "rb").read()).hexdigest()[:10]
), "corrupted file!"
return file_path, False
```
#### File: dooly/models/__init__.py
```python
import json
from packaging import version
from contextlib import contextmanager
from typing import Optional
import torch
import transformers
from transformers.configuration_utils import PretrainedConfig
from transformers.modeling_utils import PreTrainedModel
from .modeling_bart import BartForConditionalGeneration
from .modeling_fsmt import FSMTForConditionalGeneration
from .modeling_roberta import (
RobertaForDependencyParsing,
RobertaForSpanPrediction,
RobertaForSequenceTagging,
RobertaForSequenceClassification,
)
from ..build_utils import (
download_from_hf_hub,
CONFIG_USER_AGENT,
HUB_NAME,
MODEL_USER_AGENT,
CONFIG_NAME,
WEIGHTS_NAME,
)
DoolyModelHub = {
"dp": {
"ko": {"posbert.base": RobertaForDependencyParsing},
},
"mrc": {
"ko": {"brainbert.base": RobertaForSpanPrediction},
},
"mt": {
"multi": {
"transformer.large.mtpg": FSMTForConditionalGeneration,
"transformer.large.fast.mtpg": FSMTForConditionalGeneration,
},
},
"ner": {
"ko": {"charbert.base": RobertaForSequenceTagging},
"en": {"roberta.base": RobertaForSequenceTagging},
"ja": {"jaberta.base": RobertaForSequenceTagging},
"zh": {"zhberta.base": RobertaForSequenceTagging},
},
"nli": {
"ko": {"brainbert.base": RobertaForSequenceClassification},
"en": {"roberta.base": RobertaForSequenceClassification},
"ja": {"jaberta.base": RobertaForSequenceClassification},
"zh": {"zhberta.base": RobertaForSequenceClassification},
},
"qg": {
"ko": {"kobart.base": BartForConditionalGeneration},
},
"wsd": {
"ko": {"transformer.large": FSMTForConditionalGeneration},
},
}
DoolyModelHub["bt"] = DoolyModelHub["mt"]
DoolyModelHub["zero_topic"] = DoolyModelHub["nli"]
available_tasks = list(DoolyModelHub.keys())
_init_weights = True
@contextmanager
def no_init_weights(_enable=True):
global _init_weights
if _enable:
_init_weights = False
try:
yield
finally:
_init_weights = True
class DoolyModel:
""" Dooly Model """
@classmethod
def build_model(cls, task: str, lang: str, n_model: str, **kwargs):
assert (
task in available_tasks
), f"Task `{task}` is not available. See here {available_tasks}."
available_langs = DoolyModelHub[task]
assert lang in available_langs, (
f"Language `{lang}` is not available in this task {task}. "
f"See here {available_langs}."
)
available_models = available_langs[lang]
assert n_model in available_models, (
f"Model `{n_model}` is not available in this task-lang pair. "
f"See here {available_models}."
)
model_class = available_models[n_model]
return cls._build_model(task, lang, n_model, model_class, **kwargs)
@classmethod
def _build_model_config(
cls,
task: str,
lang: str,
n_model: str,
config_class: PretrainedConfig,
revision: Optional[str] = None,
cache_dir: Optional[str] = None,
force_download: bool = False,
resume_download: bool = False,
**kwargs,
) -> PretrainedConfig:
# Load from URL or cache if already cached
resolved_config_file = download_from_hf_hub(
model_id=HUB_NAME,
filename=CONFIG_NAME,
subfolder=f"{task}/{lang}/{n_model}",
revision=revision,
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
user_agent=CONFIG_USER_AGENT,
)
# _dict_from_json_file
with open(resolved_config_file, "r", encoding="utf-8") as reader:
text = reader.read()
config_dict = json.loads(text)
return config_class.from_dict(config_dict, **kwargs)
@classmethod
def _build_model(
cls,
task: str,
lang: str,
n_model: str,
model_class: PreTrainedModel,
revision: Optional[str] = None,
cache_dir: Optional[str] = None,
force_download: bool = False,
resume_download: bool = False,
low_cpu_mem_usage: bool = False,
_fast_init: bool = True,
**kwargs,
) -> PreTrainedModel:
if low_cpu_mem_usage:
assert version.parse(torch.__version__) > version.parse("1.9"), (
"torch>=1.9 is required for a normal functioning of this module"
f"using the low_cpu_mem_usage=={low_cpu_mem_usage}, "
f"but found torch=={torch.__version__}"
)
config_class: PretrainedConfig = model_class.config_class
config = cls._build_model_config(
task=task,
lang=lang,
n_model=n_model,
config_class=config_class,
revision=revision,
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
**kwargs,
)
# Load from URL or cache if already cached
resolved_archive_file = download_from_hf_hub(
model_id=HUB_NAME,
filename=WEIGHTS_NAME,
subfolder=f"{task}/{lang}/{n_model}",
revision=revision,
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
user_agent=MODEL_USER_AGENT,
)
state_dict = torch.load(resolved_archive_file, map_location="cpu")
if low_cpu_mem_usage:
loaded_state_dict_keys = [k for k in state_dict.keys()]
del state_dict # free CPU memory - will reload again later
with no_init_weights(_enable=_fast_init):
model = model_class(config, **kwargs)
# There was an update to the from_pretrained method of models in v4.18.0.
# See fetch below.
# ref. https://github.com/huggingface/transformers/releases/tag/v4.18.0
# ref. https://github.com/huggingface/transformers/pull/16343
if low_cpu_mem_usage:
kwargs = dict(
model=model,
loaded_state_dict_keys=loaded_state_dict_keys,
resolved_archive_file=resolved_archive_file,
)
if version.parse(transformers.__version__) >= version.parse("4.18.0"):
load_pretrained_model = model_class._load_pretrained_model_low_mem
else:
load_pretrained_model = model_class._load_state_dict_into_model_low_mem
load_pretrained_model(**kwargs)
else:
kwargs = dict(
model=model,
state_dict=state_dict,
pretrained_model_name_or_path=HUB_NAME,
ignore_mismatched_sizes=False,
_fast_init=_fast_init,
)
if version.parse(transformers.__version__) >= version.parse("4.18.0"):
kwargs.update(dict(resolved_archive_file=resolved_archive_file))
load_pretrained_model = model_class._load_pretrained_model
if version.parse(transformers.__version__) >= version.parse("4.19.0"):
# Note that: `is_shared` 파라미터는 지원하지 않을 예정.
# 그리고 향후 v5로 업데이트되며 문제가 생길 수 있는 코드임
# 추가로 지금 코드가 너무 지저분함...
# 해결 방안
# 1. transformers에서 PreTrainedModel 클래스의 from_pretrained
# 메서드에서 tokenizer처럼 subfolder를 지원하도록 PR 때린다.
# 2. 내부 loading script method에 접근할 수 있는 방법을 찾아본다.
kwargs.update(dict(loaded_keys=[k for k in state_dict.keys()]))
else:
load_pretrained_model = model_class._load_state_dict_into_model
model, _, _, _, _ = load_pretrained_model(**kwargs)
# make sure token embedding weights are still tied if needed
model.tie_weights()
# Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
return model
```
#### File: dooly/models/modeling_fsmt.py
```python
import math
import random
import torch.nn as nn
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.models.fsmt.modeling_fsmt import (
SinusoidalPositionalEmbedding,
EncoderLayer,
DecoderLayer,
FSMTModel as _FSMTModel,
FSMTForConditionalGeneration as _FSMTForConditionalGeneration,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
# Seq2SeqLMOutput,
# Seq2SeqModelOutput,
)
from transformers.deepspeed import is_deepspeed_zero3_enabled
def invert_mask(attention_mask):
"""Turns 1->0, 0->1, False->True, True-> False"""
assert attention_mask.dim() == 2
return attention_mask.eq(0)
class FSMTConfig(FSMTConfig):
def __init__(
self,
encoder_pre_layernorm=False,
decoder_pre_layernorm=False,
**kwargs,
):
self.encoder_pre_layernorm = encoder_pre_layernorm
self.decoder_pre_layernorm = decoder_pre_layernorm
super().__init__(**kwargs)
class FSMTEncoderLayer(EncoderLayer):
def __init__(self, config: FSMTConfig):
super().__init__(config)
self.pre_layernorm = config.encoder_pre_layernorm
def forward(
self, x, encoder_padding_mask, layer_head_mask, output_attentions=False
):
"""
Args:
x (`torch.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
encoder_padding_mask (`torch.ByteTensor`): binary ByteTensor of shape
*(batch, src_len)* where padding elements are indicated by `1`.
for t_tgt, t_src is excluded (or masked out), =0 means it is
included in attention
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(config.encoder_attention_heads,)*.
Returns:
encoded output of shape *(seq_len, batch, embed_dim)*
"""
residual = x
if self.pre_layernorm:
x = self.self_attn_layer_norm(x)
x, attn_weights = self.self_attn(
query=x,
key=x,
key_padding_mask=encoder_padding_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.pre_layernorm:
x = self.self_attn_layer_norm(x)
residual = x
if self.pre_layernorm:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.pre_layernorm:
x = self.final_layer_norm(x)
return x, attn_weights
class FSMTDecoderLayer(DecoderLayer):
def __init__(self, config: FSMTConfig):
super().__init__(config)
self.pre_layernorm = config.decoder_pre_layernorm
def forward(
self,
x,
encoder_hidden_states,
encoder_attn_mask=None,
layer_state=None,
causal_mask=None,
layer_head_mask=None,
cross_attn_layer_head_mask=None,
decoder_padding_mask=None,
output_attentions=False,
):
residual = x
if layer_state is None:
layer_state = {}
if self.pre_layernorm:
x = self.self_attn_layer_norm(x)
# Self Attention
x, self_attn_weights = self.self_attn(
query=x,
key=x,
layer_state=layer_state, # adds keys to layer state
key_padding_mask=decoder_padding_mask,
attn_mask=causal_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.pre_layernorm:
x = self.self_attn_layer_norm(x)
# Cross attention
residual = x
assert self.encoder_attn.cache_key != self.self_attn.cache_key
if self.pre_layernorm:
x = self.encoder_attn_layer_norm(x)
x, cross_attn_weights = self.encoder_attn(
query=x,
key=encoder_hidden_states,
key_padding_mask=encoder_attn_mask,
layer_state=layer_state, # mutates layer state
layer_head_mask=cross_attn_layer_head_mask,
output_attentions=output_attentions,
)
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.pre_layernorm:
x = self.encoder_attn_layer_norm(x)
# Fully Connected
residual = x
if self.pre_layernorm:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.pre_layernorm:
x = self.final_layer_norm(x)
return (
x,
self_attn_weights,
layer_state,
cross_attn_weights,
) # layer_state = cache for decoding
class FSMTEncoder(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`EncoderLayer`].
Args:
config: FSMTConfig
"""
def __init__(self, config: FSMTConfig, embed_tokens):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.padding_idx = embed_tokens.padding_idx
self.embed_tokens = embed_tokens
embed_dim = embed_tokens.embedding_dim
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.embed_positions = SinusoidalPositionalEmbedding(
config.max_position_embeddings + self.padding_idx + 1,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList(
[FSMTEncoderLayer(config) for _ in range(config.encoder_layers)]
)
self.pre_layernorm = config.encoder_pre_layernorm
if self.pre_layernorm:
self.layer_norm = nn.LayerNorm(embed_dim)
def forward(
self,
input_ids,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
"""
Args:
input_ids (`torch.LongTensor`): tokens in the source language of shape
*(batch, src_len)*
attention_mask (`torch.LongTensor`): indicating which indices are padding tokens
head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
Returns:
BaseModelOutput or Tuple comprised of:
- **x** (`torch.Tensor`): the last encoder layer's output of shape *(src_len, batch, embed_dim)*
- **encoder_states** (`Tuple(torch.FloatTensor`)): all intermediate hidden states of shape *(src_len,
batch, embed_dim)*. Only populated if *output_hidden_states:* is True.
- **all_attentions** (`Tuple(torch.FloatTensor`)): Attention weights for each layer.
During training might not be of length n_layers because of layer dropout.
"""
# check attention mask and invert
if attention_mask is not None:
attention_mask = invert_mask(attention_mask)
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_ids)
x = inputs_embeds + embed_pos
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
x = x.transpose(0, 1) # T x B x C -> B x T x C
encoder_states += (x,)
x = x.transpose(0, 1) # B x T x C -> T x B x C
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (
dropout_probability < self.layerdrop
): # skip the layer
attn = None
else:
x, attn = encoder_layer(
x,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
if output_attentions:
all_attentions = all_attentions + (attn,)
if self.pre_layernorm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if output_hidden_states:
encoder_states += (x,)
if not return_dict:
return tuple(
v for v in [x, encoder_states, all_attentions] if v is not None
)
return BaseModelOutput(
last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions
)
class FSMTDecoder(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DecoderLayer`]
Args:
config: FSMTConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: FSMTConfig, embed_tokens: nn.Embedding):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = embed_tokens.padding_idx
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = embed_tokens
embed_dim = embed_tokens.embedding_dim
self.embed_positions = SinusoidalPositionalEmbedding(
config.max_position_embeddings + self.padding_idx + 1,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList(
[FSMTDecoderLayer(config) for _ in range(config.decoder_layers)]
)
self.pre_layernorm = config.decoder_pre_layernorm
if self.pre_layernorm:
self.layer_norm = nn.LayerNorm(embed_dim)
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(
self.embed_tokens.weight, modifier_rank=None
):
embed_tokens_weight_shape = self.embed_tokens.weight.shape
else:
embed_tokens_weight_shape = self.embed_tokens.weight.shape
self.output_projection = nn.Linear(
embed_tokens_weight_shape[1], embed_tokens_weight_shape[0], bias=False
)
self.output_projection.weight = self.embed_tokens.weight
def forward(
self,
input_ids,
encoder_hidden_states,
encoder_padding_mask,
decoder_padding_mask,
decoder_causal_mask,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=False,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
"""
Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al.,
EMNLP 2019).
Args:
input_ids (`torch.LongTensor` of shape `(batch, tgt_len)`):
previous decoder outputs for teacher forcing
encoder_hidden_states: output from the encoder, used for
encoder-side attention
encoder_padding_mask: for ignoring pad tokens
past_key_values (dict or None): dictionary used for storing state during generation
head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
Returns:
BaseModelOutputWithPast or tuple:
- the decoder's features of shape *(batch, tgt_len, embed_dim)*
- the cache
- hidden states
- attentions
"""
# check attention mask and invert
if encoder_padding_mask is not None:
encoder_padding_mask = invert_mask(encoder_padding_mask)
# embed positions
positions = self.embed_positions(input_ids) # , use_cache=use_cache)
if use_cache:
input_ids = input_ids[:, -1:]
positions = positions[:, -1:] # happens after we embed them
# assert input_ids.ne(self.padding_idx).any()
x = self.embed_tokens(input_ids) * self.embed_scale
x += positions
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
# Convert to FSMT output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attns = () if output_attentions else None
next_decoder_cache = []
# check if head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip(
[head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]
):
if attn_mask is not None:
assert attn_mask.size()[0] == len(self.layers), (
f"The `{mask_name}` should be specified for {len(self.layers)} layers, "
f"but it is for {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
x = x.transpose(0, 1)
all_hidden_states += (x,)
x = x.transpose(0, 1)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
layer_state = past_key_values[idx] if past_key_values is not None else None
x, layer_self_attn, layer_past, layer_cross_attn = decoder_layer(
x,
encoder_hidden_states,
encoder_attn_mask=encoder_padding_mask,
decoder_padding_mask=decoder_padding_mask,
layer_state=layer_state,
causal_mask=decoder_causal_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx]
if cross_attn_head_mask is not None
else None
),
output_attentions=output_attentions,
)
if use_cache:
next_decoder_cache.append(layer_past.copy())
if output_attentions:
all_self_attns += (layer_self_attn,)
all_cross_attns += (layer_cross_attn,)
if self.pre_layernorm:
x = self.layer_norm(x)
# add hidden states from the last decoder layer
if output_hidden_states:
x = x.transpose(0, 1)
all_hidden_states += (x,)
x = x.transpose(0, 1)
# Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
x = self.output_projection(x)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [
x,
next_cache,
all_hidden_states,
all_self_attns,
all_cross_attns,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=x,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attns,
)
class FSMTModel(_FSMTModel):
def __init__(self, config: FSMTConfig):
super(_FSMTModel, self).__init__(config)
padding_idx = config.pad_token_id
encoder_embed_tokens = nn.Embedding(
config.src_vocab_size, config.d_model, padding_idx
)
decoder_embed_tokens = nn.Embedding(
config.tgt_vocab_size, config.d_model, padding_idx
)
self.encoder = FSMTEncoder(config, encoder_embed_tokens)
self.decoder = FSMTDecoder(config, decoder_embed_tokens)
# Initialize weights and apply final processing
self.post_init()
class FSMTForConditionalGeneration(_FSMTForConditionalGeneration):
def __init__(self, config: FSMTConfig):
super(_FSMTForConditionalGeneration, self).__init__(config)
self.model = FSMTModel(config)
```
#### File: dooly/models/modeling_roberta.py
```python
import random
from packaging import version
import torch
import torch.nn as nn
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.models.roberta.modeling_roberta import ( # noqa # pylint: disable=unused-import
RobertaModel,
RobertaPooler,
RobertaEncoder,
RobertaPreTrainedModel,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
)
from .utils.modeling_heads import (
SpanPredictionHead,
ClassificationHead,
DependencyParseHead,
SlotGenerator,
)
from .utils.modeling_outputs import (
BaseModelOutputWithPoolingAndCrossAttentions,
TokenClassifierOutput,
DependencyParsingOutput,
DialogueStateTrackingOutput,
)
from .utils.modeling_utils import masked_cross_entropy_for_value
class RobertaForDPConfig(RobertaConfig):
def __init__(
self, num_segments: int = 52, classifier_num_attention_heads: int = 8, **kwargs
):
super().__init__(**kwargs)
self.num_segments = num_segments
self.classifier_num_attention_heads = classifier_num_attention_heads
class RobertaForDSTConfig(RobertaConfig):
def __init__(
self, teacher_forcing: float = 0.5, parallel_decoding: bool = True, **kwargs
):
super().__init__(**kwargs)
self.teacher_forcing = teacher_forcing
self.parallel_decoding = parallel_decoding
class RobertaForSpanPrediction(RobertaForQuestionAnswering):
def __init__(self, config):
# Initialize on RobertaPreTrainedModel
super(RobertaForQuestionAnswering, self).__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.qa_outputs = SpanPredictionHead(config)
self.init_weights()
class RobertaForSequenceTagging(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.classifier = ClassificationHead(config)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss,
labels.view(-1),
torch.tensor(loss_fct.ignore_index).type_as(labels),
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class SegmentRobertaEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id
)
self.num_segments = config.num_segments
if config.num_segments > 0:
self.segment_embeddings = nn.Embedding(
config.num_segments, config.hidden_size, padding_idx=None
)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute"
)
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))
)
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings,
config.hidden_size,
padding_idx=self.padding_idx,
)
def forward(
self,
input_ids=None,
segment_labels=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
past_key_values_length=0,
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(
input_ids, self.padding_idx, past_key_values_length
)
else:
position_ids = self.create_position_ids_from_inputs_embeds(
inputs_embeds
)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# Setting the token_type_ids to the registered buffer in constructor where
# it is all zeros, which usually occurs when its auto-generated,
# registered buffer helps users when tracing the model without passing token_type_ids,
# solves issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
input_shape[0], seq_length
)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device
)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
if segment_labels is not None and self.num_segments > 0:
segment_embeddings = self.segment_embeddings(segment_labels)
embeddings += segment_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1,
sequence_length + self.padding_idx + 1,
dtype=torch.long,
device=inputs_embeds.device,
)
return position_ids.unsqueeze(0).expand(input_shape)
class SegmentRobertaModel(RobertaModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config, add_pooling_layer=True):
super(RobertaModel, self).__init__(config)
self.config = config
self.embeddings = SegmentRobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
if hasattr(self, "post_init"):
self.post_init()
else:
self.init_weights()
def forward(
self,
input_ids=None,
segment_labels=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = (
past_key_values[0][0].shape[2] if past_key_values is not None else 0
)
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)), device=device
)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
batch_size, seq_length
)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=device
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
segment_labels=segment_labels,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = (
self.pooler(sequence_output) if self.pooler is not None else None
)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class RobertaForDependencyParsing(RobertaPreTrainedModel):
config_class = RobertaForDPConfig
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = SegmentRobertaModel(config, add_pooling_layer=False)
self.classifier = DependencyParseHead(config)
self.init_weights()
def forward(
self,
input_ids=None,
segment_labels=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.roberta(
input_ids,
segment_labels=segment_labels,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
classifier_attn, logits = self.classifier(sequence_output, attention_mask)
loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss,
labels.view(-1),
torch.tensor(loss_fct.ignore_index).type_as(labels),
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return DependencyParsingOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
classifier_attention=classifier_attn,
)
class RobertaForDialogueStateTracking(RobertaPreTrainedModel):
config_class = RobertaForDSTConfig
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.teacher_forcing = config.teacher_forcing
self.roberta = RobertaModel(config, add_pooling_layer=True)
self.decoder = SlotGenerator(config)
self.post_init()
def _tie_weights(self):
# Share the embedding layer for both encoder and decoder
self.decoder.embed.weight = self.roberta.embeddings.word_embeddings.weight
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
gating_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
target_ids=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
encoder_output = outputs[0] # last_hidden_state
pooler_output = outputs[1].unsqueeze(0) # pooler_output
max_len, teacher = 10, None
if target_ids is not None:
max_len = target_ids.size(-1)
if self.teacher_forcing > 0.0 and random.random() < self.teacher_forcing:
teacher = target_ids
all_point_outputs, all_gate_outputs = self.decoder(
input_ids=input_ids,
encoder_output=encoder_output,
hidden=pooler_output,
input_masks=attention_mask,
max_len=max_len,
teacher=teacher,
)
loss = None
if target_ids is not None:
# generation loss
loss_gen = masked_cross_entropy_for_value(
all_point_outputs.contiguous(),
target_ids.contiguous().view(-1),
self.decoder.pad_token_id,
)
# gate loss
loss_fct = nn.CrossEntropyLoss()
loss_gate = loss_fct(
all_gate_outputs.contiguous().view(-1, self.decoder.num_gates),
gating_ids.contiguous().view(-1),
)
# total loss = generation loss + gate loss
loss = loss_gen + loss_gate
if not return_dict:
output = (
all_point_outputs,
all_gate_outputs,
) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return DialogueStateTrackingOutput(
loss=loss,
point_outputs=all_point_outputs,
gate_outputs=all_gate_outputs,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def create_position_ids_from_input_ids(
input_ids, padding_idx, past_key_values_length=0
):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (
torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length
) * mask
return incremental_indices.long() + padding_idx
```
#### File: dooly/tasks/question_generation.py
```python
from typing import List, Dict, Union, Optional
from transformers import PreTrainedModel, PreTrainedTokenizerBase
from .base import DoolyTaskConfig, Seq2Seq
from ..tokenizers import Tokenizer as _Tokenizer
from .base.wikipedia2vec import (
WhooshIndex,
Wikipedia2Vec,
SimilarWords,
)
Tokenizer = Union[_Tokenizer, PreTrainedTokenizerBase]
class QuestionGeneration(Seq2Seq):
"""
Question generation using BART model
Korean (`kobart.base.ko.qg`)
- dataset: KorQuAD 1.0 (Lim et al. 2019) + AI hub Reading Comprehension corpus + AI hub Commonsense corpus
- metric: Model base evaluation using PororoMrc (`brainbert.base`)
- EM (82.59), F1 (94.06)
- ref: https://www.aihub.or.kr/aidata/86
- ref: https://www.aihub.or.kr/aidata/84
Args:
answer (str): answer text
context (str): source article
beam (int): beam search size
temperature (float): temperature scale
top_k (int): top-K sampling vocabulary size
top_p (float): top-p sampling ratio
no_repeat_ngram_size (int): no repeat ngram size
len_penalty (float): length penalty ratio
n_wrong (int): number of wrong answer candidate
return_context (bool): return context together or not
Returns:
str : question (if `n_wrong` < 1)
Tuple[str, List[str]] : question, wrong_answers (if `n_wrong` > 1)
"""
task: str = "qg"
available_langs: List[str] = ["ko"]
available_models: Dict[str, List[str]] = {
"ko": ["kobart.base"],
}
misc_files: Dict[str, List[str]] = {
"ko": ["kowiki_20200720_100d.pkl", "ko_indexdir.zip"]
}
def __init__(
self,
config: DoolyTaskConfig,
tokenizer: Tokenizer,
model: PreTrainedModel,
):
super().__init__(config=config)
self._tokenizer = tokenizer
self._model = model
self._max_length = model.config.max_position_embeddings or 1024
# set sentence tokenizer
self._tokenizer._set_sent_tokenizer()
self._start_hl_token = "<unused0>"
self._end_hl_token = "<unused1>"
self._sim_words = SimilarWords(
model=Wikipedia2Vec(config.misc_tuple[0], self.device),
idx=WhooshIndex.open_dir(config.misc_tuple[1]),
)
self.finalize()
@property
def start_hl_token(self):
""" Get start highlight token """
return self._start_hl_token
@start_hl_token.setter
def start_hl_token(self, val):
""" Set start highlight token """
self._start_hl_token = val
@property
def end_hl_token(self):
""" Get end highlight token """
return self._end_hl_token
@end_hl_token.setter
def end_hl_token(self, val):
""" Set end highlight token """
self._end_hl_token = val
@property
def max_length(self):
return self._max_length
def _focus_answer(self, context: str, answer: str, truncate: bool = True):
"""
add answer start and end token
and truncate context text to make inference speed fast
Args:
context (str): context string
answer (str): answer string
truncate (bool): truncate or not
Returns:
context (str): preprocessed context string
"""
start_idx = context.find(answer)
end_idx = start_idx + len(answer) + len(self.start_hl_token)
# insert highlight tokens
context = context[:start_idx] + self.start_hl_token + context[start_idx:]
context = context[:end_idx] + self.end_hl_token + context[end_idx:]
if len(context) < self.max_length or not truncate:
return context
sentences = self.tokenizer.sent_tokenize(context)
answer_sent_idx = None
for i in range(len(sentences)):
if self.start_hl_token in sentences[i]:
answer_sent_idx = i
break
i, j = answer_sent_idx, answer_sent_idx
truncated_context = [sentences[answer_sent_idx]]
while len(" ".join(truncated_context)) < self.max_length:
prev_context_length = len(" ".join(truncated_context))
i -= 1
j += 1
if i > 0:
truncated_context = [sentences[i]] + truncated_context
if j < len(sentences):
truncated_context = truncated_context + [sentences[j]]
if len(" ".join(truncated_context)) == prev_context_length:
break
truncated_context = " ".join(truncated_context)
if len(truncated_context) > self.max_length:
if start_idx < len(context) // 2:
truncated_context = truncated_context[: self.max_length]
else:
truncated_context = truncated_context[
len(truncated_context) - self.max_length : # noqa
]
return truncated_context
def __call__(
self,
answer: Union[List[str], str],
context: Union[List[str], str],
add_special_tokens: bool = True,
do_sent_split: bool = True,
beams: int = 5,
max_len_a: int = 1,
max_len_b: int = 50,
temperature: float = 1.0,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
no_repeat_ngram_size: int = 4,
length_penalty: float = 1.0,
return_context: bool = False,
batch_size: int = 32,
verbose: bool = True,
n_wrong: int = 0,
**kwargs,
):
assert isinstance(n_wrong, int)
if isinstance(answer, str) and isinstance(context, str):
context = [self._focus_answer(context, answer)]
elif isinstance(answer, list) and isinstance(context, str):
context = [self._focus_answer(context, a) for a in answer]
elif isinstance(answer, str) and isinstance(context, list):
context = [self._focus_answer(c, answer) for c in context]
elif isinstance(answer, list) and isinstance(context, list):
assert len(answer) == len(
context
), "length of answer list and context list must be same."
context = [self._focus_answer(c, a) for c, a in zip(context, answer)]
generated = self.generate(
text=context,
add_special_tokens=add_special_tokens,
beams=beams,
max_len_a=max_len_a,
max_len_b=max_len_b,
temperature=temperature,
top_k=top_k,
top_p=top_p,
no_repeat_ngram_size=no_repeat_ngram_size,
length_penalty=length_penalty,
batch_size=batch_size,
verbose=verbose,
**kwargs,
)
if issubclass(self.tokenizer.__class__, PreTrainedTokenizerBase):
decoded_text = self.tokenizer.batch_decode(
generated,
skip_special_tokens=True,
clean_up_tokenization_spaces=True,
)
else:
decoded_text = self.tokenizer.decode(generated)
output = [self._postprocess(text) for text in decoded_text]
if n_wrong > 0:
if isinstance(context, str) and isinstance(answer, str):
wrong_answers = self._sim_words._extract_wrongs(answer)
output = output, wrong_answers[:n_wrong]
elif isinstance(context, list) and isinstance(answer, str):
wrong_answers = self._sim_words._extract_wrongs(answer)[:n_wrong]
output = [(o, wrong_answers) for o in output]
elif isinstance(context, str) and isinstance(answer, list):
wrong_answers = [
self._sim_words._extract_wrongs(a)[:n_wrong] for a in answer
]
output = [(output, w) for w in wrong_answers]
else:
wrong_answers = [
self._sim_words._extract_wrongs(a)[:n_wrong] for a in answer
]
output = [(o, w) for o, w in zip(output, wrong_answers)]
return output
def _postprocess(self, text: str):
text = text.strip()
if not text.endswith("?"):
text += "?"
return text
```
#### File: dooly/tokenizers/base.py
```python
import re
import torch
import unicodedata
from abc import abstractmethod
from typing import List, Union, Dict, Set, Optional
from .import_utils import (
is_available_kss,
is_available_nltk,
)
SPACE_NORMALIZER = re.compile(r"\s+")
InputTexts = Union[str, List[str]]
TokenizedOutput = Union[List[str], List[List[str]]]
EncodedOutput = Union[List[int], List[List[int]], torch.Tensor]
PaddedOutput = Union[List[List[int]], torch.Tensor]
DecodedOutput = Union[str, List[str]]
class _BaseTokenizer:
def __init__(
self,
lang: str,
vocab: Dict[str, int],
cls_token: str = "<s>",
sep_token: str = "</s>",
pad_token: str = "<pad>",
unk_token: str = "<unk>",
padding_side: str = "right",
max_seq_length: int = 512,
):
assert padding_side in ["right", "left"]
self.lang = lang
self.vocab = vocab
self.pos_vocab = None
self.id2token = {i: tok for tok, i in vocab.items()}
self.cls_token = cls_token
self.sep_token = sep_token
self.pad_token = pad_token
self.unk_token = unk_token
self.padding_side = padding_side
self.max_seq_length = max_seq_length
self._langtok_style = "basic"
self.sub_tokenizer = {}
@property
def cls_token_id(self) -> int:
return self.vocab[self.cls_token]
@property
def sep_token_id(self) -> int:
return self.vocab[self.sep_token]
@property
def pad_token_id(self) -> int:
return self.vocab[self.pad_token]
@property
def unk_token_id(self) -> int:
return self.vocab[self.unk_token]
@property
def nspecial(self) -> int:
return 4 # cls, sep, pad, unk
@property
def langtok_style(self):
return self._langtok_style
@langtok_style.setter
def langtok_style(self, val: str):
self._langtok_style = val
def _langtok(self, lang: str):
# https://github.com/pytorch/fairseq/blob/master/fairseq/data/multilingual/multilingual_utils.py#L34
langtok = ""
if self.langtok_style == "basic":
langtok = f"[{lang.upper()}]"
elif self.langtok_style == "mbart":
mapping = {"en": "_XX", "ja": "_XX", "ko": "_KR", "zh": "_CN"}
langtok = f"[{lang + mapping[lang]}]"
elif self.langtok_style == "multilingual":
langtok = f"__{lang}__"
return langtok
def _set_sub_tokenizer(self, lang: str, tokenizer_object):
self.sub_tokenizer[lang] = tokenizer_object
def __call__(
self,
text: InputTexts,
text_pair: Optional[InputTexts] = None,
src_lang: Optional[InputTexts] = None,
tgt_lang: Optional[InputTexts] = None,
padding: Union[str, bool] = False,
return_tokens: bool = False,
return_tags: bool = True,
return_tensors: Union[str, bool] = False,
return_attention_mask: bool = True,
add_special_tokens: bool = True,
no_separator: bool = False,
) -> Union[TokenizedOutput, Dict[str, EncodedOutput]]:
return self.encode(
text=text,
text_pair=text_pair,
src_lang=src_lang,
tgt_lang=tgt_lang,
padding=padding,
return_tokens=return_tokens,
return_tags=return_tags,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
add_special_tokens=add_special_tokens,
no_separator=no_separator,
)
def _normalize(self, text: str) -> str:
""" Unicode normalization and whitespace removal (often needed for context) """
text = unicodedata.normalize("NFKC", text)
text = self._normalize_space(text)
return text
@staticmethod
def _normalize_space(text: str) -> str:
return SPACE_NORMALIZER.sub(" ", text).strip()
@abstractmethod
def _tokenize(self, text: str, *args, **kwargs) -> List[str]:
pass
def tokenize(
self,
text: str,
text_pair: Optional[str] = None,
src_lang: Optional[str] = None,
tgt_lang: Optional[str] = None,
return_tags: bool = True,
add_special_tokens: bool = False,
no_separator: bool = False,
) -> List[str]:
"""
If you want to use `src_lang` and `tgt_lang` parameters, plz overrides!
"""
if self.pos_vocab is None:
return_tags = False
tokenized = self._tokenize(text)
if return_tags:
tokenized, tags = tokenized
if add_special_tokens:
tokenized = [self.cls_token] + tokenized + [self.sep_token]
if return_tags:
tags = [self.cls_token] + tags + [self.sep_token]
if text_pair is not None:
tokenized += [self.sep_token] if not no_separator else []
tokenized_pair = self._tokenize(text_pair)
if return_tags:
tags += [self.sep_token] if no_separator else []
tokenized_pair, tags_pair = tokenized_pair
tags += tags_pair
tokenized += tokenized_pair
if add_special_tokens:
tokenized += [self.sep_token]
if return_tags:
tags += [self.sep_token]
if return_tags:
return tokenized, tags
return tokenized
def encode_line(
self,
tokenized: List[str],
add_special_tokens: bool = False,
use_pos_vocab: bool = False,
) -> List[int]:
vocab = self.vocab
if use_pos_vocab and self.pos_vocab is not None:
vocab = self.pos_vocab
encoded = []
for token in tokenized:
encoded.append(vocab.get(token, self.unk_token_id))
if add_special_tokens:
encoded = [self.cls_token_id] + encoded + [self.sep_token_id]
return encoded
def encode(
self,
text: InputTexts,
text_pair: Optional[InputTexts] = None,
src_lang: Optional[InputTexts] = None,
tgt_lang: Optional[InputTexts] = None,
padding: Union[str, bool] = False,
return_tokens: bool = False,
return_tags: bool = True,
return_tensors: Union[str, bool] = False,
return_attention_mask: bool = True,
add_special_tokens: bool = True,
no_separator: bool = False,
) -> Union[TokenizedOutput, Dict[str, EncodedOutput]]:
""" Encode tokens to ids, used for single or batched sentence """
assert isinstance(return_tensors, bool) or return_tensors == "pt"
return_tensors = (return_tensors == "pt") or return_tensors
assert text_pair is None or type(text) == type(text_pair)
if (src_lang is None) ^ (tgt_lang is None):
src_lang = tgt_lang = None
if not hasattr(self, "pos_tagger"):
return_tags = False
if isinstance(text, str):
return self.encode(
text=[text],
text_pair=[text_pair],
src_lang=[src_lang],
tgt_lang=[tgt_lang],
padding=padding,
return_tokens=return_tokens,
return_tags=return_tags,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
add_special_tokens=add_special_tokens,
no_separator=no_separator,
)
if text_pair is None:
text_pair = [None] * len(text)
if src_lang is None:
src_lang = [None] * len(text)
if tgt_lang is None:
tgt_lang = [None] * len(text)
assert len(text) == len(text_pair)
assert len(src_lang) == len(tgt_lang)
if len(src_lang) == 1:
src_lang = src_lang * len(text)
tgt_lang = tgt_lang * len(text)
assert len(text) == len(src_lang)
texts, text_pairs = text, text_pair
src_langs, tgt_langs = src_lang, tgt_lang
input_ids = []
segment_labels = []
for text, text_pair, src_lang, tgt_lang in zip(
texts, text_pairs, src_langs, tgt_langs
):
tokenized = self.tokenize(
text=text,
text_pair=text_pair,
src_lang=src_lang,
tgt_lang=tgt_lang,
return_tags=return_tags,
no_separator=no_separator,
add_special_tokens=add_special_tokens,
)
encoded = None
encoded_tags = None
if return_tags:
tokenized, tags = tokenized
if not return_tokens:
encoded = self.encode_line(tokenized=tokenized)
if return_tags:
encoded_tags = self.encode_line(tokenized=tags, use_pos_vocab=True)
input_ids.append(tokenized if return_tokens else encoded)
if return_tags:
segment_labels.append(tags if return_tokens else encoded_tags)
if return_tokens:
input_ids = input_ids if len(texts) > 1 else input_ids[0]
if return_tags:
segment_labels = segment_labels if len(texts) > 1 else segment_labels[0]
return input_ids, segment_labels
return input_ids
attention_mask = None
if return_tensors or padding:
padded = self.pad(
sequences={"input_ids": input_ids},
padding=padding,
return_tensors=return_tensors,
)
input_ids = padded["input_ids"]
attention_mask = padded["attention_mask"]
if return_tags:
segment_labels = self.pad(
sequences={"input_ids": segment_labels},
padding=padding,
return_tensors=return_tensors,
)["input_ids"]
batch_encoding = {"input_ids": input_ids}
if return_attention_mask and attention_mask is not None:
batch_encoding.update({"attention_mask": attention_mask})
if return_tags:
batch_encoding.update({"segment_labels": segment_labels})
return batch_encoding
def decode_line(self, ids: List[int], ignore_symbols: Set[int] = {}) -> str:
sent = []
for _id in ids:
if _id not in ignore_symbols:
sent.append(self.id2token.get(_id, self.unk_token))
return " ".join(sent)
def _recover_original(self, decoded_text: str) -> str:
return decoded_text
def decode(
self,
ids: EncodedOutput,
ignore_symbols: List[int] = [],
recover_original: bool = True,
) -> DecodedOutput:
if isinstance(ids, torch.Tensor):
ids = ids.detach().cpu().tolist()
if isinstance(ids[0], int):
return self.decode(
ids=[ids],
ignore_symbols=ignore_symbols,
recover_original=recover_original,
)
ignore_symbols = set(None or ignore_symbols)
ignore_symbols.update([self.cls_token_id, self.sep_token_id, self.pad_token_id])
list_of_ids = ids
decoded_texts = []
for ids in list_of_ids:
decoded = self.decode_line(ids, ignore_symbols)
if recover_original:
decoded = self._recover_original(decoded)
decoded_texts.append(decoded)
if len(decoded_texts) == 1:
decoded_texts = decoded_texts[0]
return decoded_texts
def pad(
self,
sequences: Dict[str, EncodedOutput],
padding: Union[str, bool] = True,
return_tensors: bool = True,
pad_to_multiple_of: Union[int, bool] = False, # match to hf pad method
) -> Dict[str, PaddedOutput]:
"""Pad batched sequences.
if return_tensors, then return torch.LongTensor object.
"""
input_ids = sequences.get("input_ids")
assert input_ids is not None
if isinstance(input_ids[0], int):
input_ids = [input_ids]
max_length = -1
if padding == "max_length":
max_length = self.max_seq_length
else:
max_length = max(len(ids) for ids in input_ids)
padded = {"input_ids": [], "attention_mask": []}
for ids in input_ids:
seq_len = len(ids)
if self.padding_side == "right":
ids = ids + [self.pad_token_id] * (max_length - seq_len)
attn_mask = [1] * seq_len + [0] * (max_length - seq_len)
else:
ids = [self.pad_token_id] * (max_length - seq_len) + ids
attn_mask = [0] * (max_length - seq_len) + [1] * seq_len
padded["input_ids"].append(ids)
padded["attention_mask"].append(attn_mask)
if return_tensors:
for k, v in padded.items():
padded[k] = torch.LongTensor(v)
return padded
class SentTokenizeMixin:
""" Sentence Tokenization Mixin """
def _set_sent_tokenizer(self):
if self.lang in ["ko", "multi"]:
if is_available_kss():
from kss import split_sentences
self._ko_sent_tokenizer = split_sentences
else:
raise ModuleNotFoundError("Please install kss with: `pip install kss`.")
if self.lang in ["en", "multi"]:
if is_available_nltk():
import nltk
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")
from nltk.tokenize import sent_tokenize
self._en_sent_tokenizer = sent_tokenize
else:
raise ModuleNotFoundError(
"Please install nltk with: `pip install nltk`."
)
def sent_tokenize(
self,
texts: InputTexts,
langs: Optional[InputTexts] = None,
) -> List[List[str]]:
if isinstance(texts, str):
texts = [texts]
if langs is None:
langs = self.lang
elif self.lang != "multi": # F632
raise AttributeError("`langs` parameter is only used for `multi` model.")
if isinstance(langs, str):
langs = [langs] * len(texts)
do_per_sample = False
if len(set(langs)) == 1 and langs[0] == "ko":
# korean sentence splitter can be batched
if not hasattr(self, "_ko_sent_tokenizer"):
raise AttributeError
try:
sentences = self._ko_sent_tokenizer(texts)
except Exception:
do_per_sample = True
else:
do_per_sample = True
if do_per_sample:
sentences = []
for text, lang in zip(texts, langs):
if lang in "ko":
if not hasattr(self, "_ko_sent_tokenizer"):
raise AttributeError
sentences.append(self._ko_sent_tokenizer(text))
elif lang == "en":
if not hasattr(self, "_en_sent_tokenizer"):
raise AttributeError
sentences.append(self._en_sent_tokenizer(text))
else: # lang in ["ja", "zh"]
text = text.replace("。", "。[SEP]")
text = text.replace("!", "![SEP]")
text = text.replace("?", "?[SEP]")
if "[SEP]" in text:
sents = text.split("[SEP]")
sents = sents[:-1]
else:
sents = [text]
sentences.append(sents)
num_sentences = [len(sents) for sents in sentences]
return sentences, num_sentences
class Tokenizer(_BaseTokenizer, SentTokenizeMixin):
""" Whitespace Base Tokenizer with sentence tokenizer """
pass
```
#### File: jinmang2/DOOLY/setup.py
```python
import re
import shutil
from pathlib import Path
from setuptools import setup, find_packages
VERSION = {} # type: ignore
with open("dooly/__version__.py", "r") as version_file:
exec(version_file.read(), VERSION)
# Remove stale dooly.egg-info directory to avoid https://github.com/pypa/pip/issues/5466
stale_egg_info = Path(__file__).parent / "dooly.egg-info"
if stale_egg_info.exists():
print(
(
"Warning: {} exists.\n\n"
"If you recently updated dooly, this is expected,\n"
"but it may prevent dooly from installing in editable mode.\n\n"
"This directory is automatically generated by Python's packaging tools.\n"
"I will remove it now.\n\n"
"See https://github.com/pypa/pip/issues/5466 for details.\n"
).format(stale_egg_info)
)
shutil.rmtree(stale_egg_info)
_deps = [
"black~=22.0",
"flake8>=3.8.3",
"dataclasses",
"datasets",
"numpy>=1.17",
"fugashi>=1.0",
"filelock",
"huggingface-hub>=0.1.0,<1.0",
"importlib_metadata",
"jieba",
"requests",
"regex",
"packaging>=20.0",
"pyyaml>=5.1",
"pororo",
"boto3",
"whoosh",
"ipadic>=1.0.0,<2.0",
"tqdm>=4.27",
"torch>=1.0",
"tokenizers>=0.11.1,!=0.11.3",
"transformers>=4.8.2",
"kss>=3.4.2",
"nltk",
]
deps = {
b: a for a, b in (re.findall(r"^(([^!=<>~]+)(?:[!=<>~].*)?$)", x)[0] for x in _deps)
}
def deps_list(*pkgs):
return [deps[pkg] for pkg in pkgs]
extras = {}
extras["ja"] = deps_list("fugashi", "ipadic")
extras["zh"] = deps_list("jieba")
extras["search"] = deps_list("whoosh")
extras["convert"] = deps_list("pororo", "boto3")
extras["quality"] = deps_list("black", "flake8")
extras["all"] = extras["ja"] + extras["zh"] + extras["search"] + extras["quality"]
extras["pororo"] = extras["all"] + extras["convert"]
install_requires = [
deps["dataclasses"]
+ ";python_version<'3.7'", # dataclasses for Python versions that don't have it
deps["filelock"], # filesystem locks, e.g., to prevent parallel downloads
deps["huggingface-hub"],
deps["datasets"],
deps["numpy"],
deps["torch"],
deps["packaging"], # utilities from PyPA to e.g., compare versions
deps["pyyaml"], # used for the model cards metadata
deps["regex"], # for OpenAI GPT
deps["requests"], # for downloading models over HTTPS
deps["tokenizers"],
deps["transformers"],
deps["tqdm"], # progress bars in model download and training scripts
deps["kss"],
deps["nltk"],
]
setup(
name="dooly",
version=VERSION["version"],
url="https://github.com/jinmang2/DOOLY",
author="jinmang2",
author_email="<EMAIL>",
description="A library that handles everything with 🤗 and supports batching to models in PORORO",
python_requires=">=3.6.0",
packages=find_packages(exclude=["tests"]),
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
extras_require=extras,
install_requires=install_requires,
zip_safe=False,
classifiers=[
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: Apache Software License",
],
)
``` |
{
"source": "jinmang2/hierarchical-transformer-1d",
"score": 3
} |
#### File: hierarchical-transformer-1d/src/configuration_htransformer1d.py
```python
from math import log2
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class HTransformer1DConfig(PretrainedConfig):
r"""
[DESCRIPTION]
Args:
Example::
"""
model_type = "h-transformer-1d"
def __init__(
self,
vocab_size=50000,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
block_size=128, # this is the Nr in the paper - Nb = (max_seq_len / tokens_per_block)
reversible=True, # use reversibility, to save on memory with increased depth
shift_tokens=True, # whether to shift half the feature space by one along the sequence dimension, for faster convergence (experimental feature)
attention_probs_dropout_prob=0.1,
max_position_embeddings=8192, # dim_head 정보로 parameterization
# 얘는 max_seq_len으로 사용
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
attn_eps=1e-8,
pad_token_id=0,
rotary_value=False, # value도 rotary를 적용할 지 안할지
rotary_theta=10000,
learned_freq=False,
# use_cache=True,
position_embedding_type="rotary",
**kwargs
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
assert (max_position_embeddings % block_size) == 0, (
'maximum sequence length must be divisible by the block size'
)
num_blocks = max_position_embeddings // block_size
assert log2(max_position_embeddings // block_size).is_integer(), (
f'number of blocks {num_blocks} must be a power of 2'
)
assert (hidden_size % num_attention_heads) == 0, (
'hidden size must be divisible by the number of attention heads'
)
assert position_embedding_type in ['absolute', 'rotary'], (
'position embedding type must be either \'absolute\' or \'rotary\''
)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.dim_head = hidden_size // num_attention_heads
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.block_size = block_size
self.num_blocks = num_blocks
self.reversible = reversible
self.shift_tokens = shift_tokens
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.attn_eps = attn_eps
self.rotary_value = rotary_value
self.rotary_theta = rotary_theta
self.learned_freq = learned_freq
# self.use_cache = use_cache
self.position_embedding_type = position_embedding_type
``` |
{
"source": "jinmang2/PRML",
"score": 2
} |
#### File: nn/array/ones.py
```python
from prml.nn.array.array import Array
from prml.nn.config import config
import numpy as np
def ones(size):
return Array(np.ones(size, dtype=config.dtype))
```
#### File: nn/math/add.py
```python
import numpy as np
from prml.nn.function import Function
class Add(Function):
enable_auto_broadcast = True
@staticmethod
def _forward(x, y):
return x + y
@staticmethod
def _backward(delta, x, y):
return delta, delta
class AddBias(Function):
@staticmethod
def _forward(x, y):
return x + y
@staticmethod
def _backward(delta, x, y):
dx = delta
dy = np.sum(delta, axis=tuple(i for i in range(x.ndim - 1)))
return dx, dy
class AddScalar(Function):
@staticmethod
def _forward(x, y):
return x + y
@staticmethod
def _backward(delta, x, y):
dx = delta
dy = np.atleast_1d(np.sum(delta))
return dx, dy
def add(x, y):
return Add().forward(x, y)
# x = Function._convert2array(x)
# y = Function._convert2array(y)
# if x.shape == y.shape:
# return Add().forward(x, y)
# elif x.size == 1:
# return AddScalar().forward(y, x)
# elif y.size == 1:
# return AddScalar().forward(x, y)
# elif x.shape[-1] == y.shape[-1]:
# if x.ndim == 1:
# return AddBias().forward(y, x)
# elif y.ndim == 1:
# return AddBias().forward(x, y)
# else:
# raise ValueError
```
#### File: nn/math/matmul.py
```python
from prml.nn.function import Function
class Matmul(Function):
@staticmethod
def _forward(x, y):
return x @ y
@staticmethod
def _backward(delta, x, y):
dx = delta @ y.T
dy = x.T @ delta
return dx, dy
def matmul(x, y):
return Matmul().forward(x, y)
def rmatmul(x, y):
return Matmul().forward(y, x)
```
#### File: nn/math/multiply.py
```python
import numpy as np
from prml.nn.function import Function
class Multiply(Function):
enable_auto_broadcast = True
@staticmethod
def _forward(x, y):
return x * y
@staticmethod
def _backward(delta, x, y):
dx = delta * y
dy = delta * x
return dx, dy
def multiply(x, y):
return Multiply().forward(x, y)
```
#### File: nn/math/sqrt.py
```python
import numpy as np
from prml.nn.function import Function
class Sqrt(Function):
def _forward(self, x):
self.output = np.sqrt(x)
return self.output
def _backward(self, delta, x):
return 0.5 * delta / self.output
def sqrt(x):
return Sqrt().forward(x)
```
#### File: nn/normalization/batch_normalization.py
```python
import numpy as np
from prml.nn.array.ones import ones
from prml.nn.array.zeros import zeros
from prml.nn.config import config
from prml.nn.function import Function
from prml.nn.network import Network
class BatchNormalizationFunction(Function):
def _forward(self, x):
self.mean = x.mean(axis=0)
self.xc = x - self.mean
self.var = np.mean(self.xc ** 2, axis=0)
self.std = np.sqrt(self.var + 1e-7)
return self.xc / self.std
def _backward(self, delta, x):
# dstd = -np.mean((delta * self.xc) / (self.std ** 2), axis=0)
dxc = delta / self.std - self.xc * np.mean((delta * self.xc) / (self.std ** 3), axis=0)
return dxc - np.mean(dxc, axis=0)
# dstd = -np.mean((delta * self.xc) / (self.std ** 2), axis=0)
# dxc = delta / self.std + self.xc * dstd / self.std
# return dxc - np.mean(dxc, axis=0)
# dxn = delta
# dxc = dxn / self.std
# dstd = -np.sum((dxn * self.xc) / (self.std ** 2), axis=0)
# dvar = 0.5 * dstd / self.std
# dxc += 2.0 * self.xc * dvar / delta.shape[0]
# dmu = np.sum(dxc, axis=0)
# dx = dxc - dmu / delta.shape[0]
# return dx
class BatchNormalization(Network):
def __init__(self, ndim, scale=None, bias=None, momentum=0.9):
super().__init__()
self.momentum = momentum
with self.set_parameter():
self.mean = zeros(ndim)
self.var = ones(ndim)
def __call__(self, x):
shape = x.shape
x = x.reshape(-1, x.shape[-1])
if config.is_updating_bn:
func = BatchNormalizationFunction()
out = func.forward(x)
self.mean.value = self.momentum * self.mean.value + (1 - self.momentum) * func.mean
self.var.value = self.momentum * self.var.value + (1 - self.momentum) * func.var
del func.mean
del func.var
else:
xc = x - self.mean
out = xc / np.sqrt(self.var.value + 1e-7)
return out.reshape(*shape)
```
#### File: nn/optimizer/ada_delta.py
```python
import numpy as np
from prml.nn.config import config
from prml.nn.optimizer.optimizer import Optimizer
class AdaDelta(Optimizer):
"""
AdaDelta optimizer
"""
def __init__(self, parameter: dict, rho=0.95, epsilon=1e-8):
super().__init__(parameter, None)
self.rho = rho
self.epsilon = epsilon
self.mean_squared_deriv = {}
self.mean_squared_update = {}
for key, param in self.parameter.items():
self.mean_squared_deriv[key] = np.zeros(param.shape, dtype=config.dtype)
self.mean_squared_update[key] = np.zeros(param.shape, dtype=config.dtype)
def update(self):
for key in self.parameter:
param = self.parameter[key]
if param.grad is None:
continue
msd = self.mean_squared_deriv[key]
msu = self.mean_squared_update[key]
grad = param.grad
msd *= self.rho
msd += (1 - self.rho) * grad ** 2
delta = np.sqrt((msu + self.epsilon) / (msd + self.epsilon)) * grad
msu *= self.rho
msu *= (1 - self.rho) * delta ** 2
param.value += delta
```
#### File: nn/optimizer/gradient.py
```python
from prml.nn.optimizer.optimizer import Optimizer
class Gradient(Optimizer):
def __init__(self, parameter, learning_rate=1e-3):
super().__init__(parameter, learning_rate)
def update(self):
for param in self.parameter.values():
param.value += self.learning_rate * param.grad
```
#### File: test/test_linear/test_linear_regression.py
```python
import unittest
import numpy as np
from prml.linear import LinearRegression
class TestLinearRegression(unittest.TestCase):
def test_fit(self):
x_train = np.array([-1, 0, 1]).reshape(-1, 1)
y_train = np.array([-2, 0, 2])
model = LinearRegression()
model.fit(x_train, y_train)
self.assertTrue(
np.allclose(model.w, np.array([2])),
)
def test_predict(self):
x_train = np.array([-1, 0, 1]).reshape(-1, 1)
y_train = np.array([-2, 0, 2])
model = LinearRegression()
model.fit(x_train, y_train)
actual = model.predict(np.array([[3]]))
self.assertTrue(np.allclose(actual, np.array([6])))
if __name__ == '__main__':
unittest.main()
```
#### File: test/test_linear/test_logistic_regression.py
```python
import unittest
import numpy as np
from prml.linear import LogisticRegression
class TestLogisticRegression(unittest.TestCase):
def test_fit_classify_proba(self):
x_train = np.array([-3, -2, -1, 1, 2, 3]).reshape(-1, 1)
y_train = np.array([0, 0, 1, 0, 1, 1])
model = LogisticRegression()
model.fit(x_train, y_train)
self.assertTrue(np.allclose(model.w, np.array([0.73248753])))
actual = model.classify(np.array([[-5], [5]]))
self.assertTrue(np.allclose(actual, np.array([0, 1])))
actual = model.proba(np.array([[0], [4]]))
self.assertTrue(np.allclose(actual, np.array([0.5, 0.94930727])))
if __name__ == '__main__':
unittest.main()
```
#### File: test_nn/test_image/test_convolve2d.py
```python
import unittest
import numpy as np
from scipy.ndimage.filters import correlate
import prml.nn as nn
class TestConvolve2d(unittest.TestCase):
def test_convolve2d_forward(self):
img = np.random.randn(1, 5, 5, 1)
kernel = np.random.randn(3, 3, 1, 1)
output = nn.convolve2d(img, kernel)
self.assertTrue(
np.allclose(
output.value[0, ..., 0],
correlate(img[0, ..., 0], kernel[..., 0, 0])[1:-1, 1:-1]
)
)
self.assertEqual(nn.config.dtype, np.float32)
self.assertEqual(output.value.dtype, nn.config.dtype)
def test_convolve2d_backward(self):
x = nn.random.normal(0, 1, (1, 5, 5, 1))
w = nn.random.normal(0, 1, (3, 3, 1, 1))
for _ in range(1000):
x.cleargrad()
w.cleargrad()
output = nn.convolve2d(x, w, (2, 2), (1, 1))
output.backward(2 * (output.value - 1))
x.value -= x.grad * 0.01
w.value -= w.grad * 0.01
self.assertTrue(np.allclose(output.value, 1))
self.assertEqual(nn.config.dtype, np.float32)
self.assertEqual(x.dtype, nn.config.dtype)
self.assertEqual(w.dtype, nn.config.dtype)
self.assertEqual(output.dtype, nn.config.dtype)
def test_convolve2d_network(self):
x = nn.random.normal(0, 1, (1, 5, 5, 1))
kernel = nn.random.normal(0, 1, (3, 3, 1, 1))
conv = nn.image.Convolve2d(kernel, (1, 1), (0, 0))
for _ in range(1000):
x.cleargrad()
conv.clear()
output = conv(x)
output.backward(2 * (output.value - 1))
x.value -= x.grad * 0.01
for param in conv.parameter.values():
param.value -= param.grad * 0.01
self.assertTrue(np.allclose(output.value, 1))
if __name__ == "__main__":
unittest.main()
```
#### File: test_nn/test_math/test_add.py
```python
import unittest
import numpy as np
import prml.nn as nn
class TestAdd(unittest.TestCase):
def test_add(self):
npa = np.random.randn(4, 5)
npb = np.random.randn(4, 5)
a = nn.asarray(npa)
b = nn.asarray(npb)
c = a + b
self.assertTrue(np.allclose(c.value, npa + npb))
npg = np.random.randn(4, 5)
c.backward(npg)
self.assertTrue(np.allclose(a.grad, npg))
self.assertTrue(np.allclose(b.grad, npg))
def test_add_bias(self):
npa = np.random.randn(4, 3)
npb = np.random.randn(3)
a = nn.asarray(npa)
b = nn.asarray(npb)
c = a + b
self.assertTrue(np.allclose(c.value, npa + npb))
npg = np.random.randn(4, 3)
c.backward(npg)
self.assertTrue(np.allclose(a.grad, npg))
self.assertTrue(np.allclose(b.grad, npg.sum(axis=0)))
def test_add_scalar(self):
npa = np.random.randn(5, 6)
npb = 2
a = nn.asarray(npa)
b = nn.asarray(npb)
c = a + b
self.assertTrue(np.allclose(c.value, npa + npb))
npg = np.random.randn(5, 6)
c.backward(npg)
self.assertTrue(np.allclose(a.grad, npg))
self.assertTrue(np.allclose(b.grad, np.sum(npg)))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jinmang2/RetroReader",
"score": 2
} |
#### File: jinmang2/RetroReader/app.py
```python
import streamlit as st
import io
import os
import yaml
import pyarrow
import tokenizers
os.environ["TOKENIZERS_PARALLELISM"] = "true"
# SETTING PAGE CONFIG TO WIDE MODE
st.set_page_config(layout="wide")
@st.cache
def from_library():
from retro_reader import RetroReader
from retro_reader import constants as C
return C, RetroReader
C, RetroReader = from_library()
# https://stackoverflow.com/questions/70274841/streamlit-unhashable-typeerror-when-i-use-st-cache
my_hash_func = {
io.TextIOWrapper: lambda _: None,
pyarrow.lib.Buffer: lambda _: 0,
tokenizers.Tokenizer: lambda _: None,
tokenizers.AddedToken: lambda _: None
}
# @st.cache(hash_funcs=my_hash_func, allow_output_mutation=True)
# def load_ko_roberta_large_model():
# config_file = "configs/inference_ko_roberta_large.yaml"
# return RetroReader.load(config_file=config_file)
@st.cache(hash_funcs=my_hash_func, allow_output_mutation=True)
def load_ko_electra_small_model():
config_file = "configs/inference_ko_electra_small.yaml"
return RetroReader.load(config_file=config_file)
# @st.cache(hash_funcs=my_hash_func, allow_output_mutation=True)
# def load_en_electra_large_model():
# config_file = "configs/inference_en_electra_large.yaml"
# return RetroReader.load(config_file=config_file)
RETRO_READER_HOST = {
# "klue/roberta-large": load_ko_roberta_large_model(),
"monologg/koelectra-small-v3-discriminator": load_ko_electra_small_model(),
# "google/electra-large-discriminator": load_en_electra_large_model(),
}
def main():
st.title("Retrospective Reader Demo")
st.markdown("## Model name")
option = st.selectbox(
label="Choose the model used in retro reader",
options=(
"[ko_KR] klue/roberta-large",
"[ko_KR] monologg/koelectra-small-v3-discriminator",
"[en_XX] google/electra-large-discriminator"
),
index=1,
)
lang_code, model_name = option.split(" ")
retro_reader = RETRO_READER_HOST[model_name]
# retro_reader = load_model()
lang_prefix = "KO" if lang_code == "[ko_KR]" else "EN"
height = 300 if lang_code == "[ko_KR]" else 200
retro_reader.null_score_diff_threshold = st.sidebar.slider(
label="null_score_diff_threshold",
min_value=-10.0, max_value=10.0, value=0.0, step=1.0,
help="ma!",
)
retro_reader.rear_threshold = st.sidebar.slider(
label="rear_threshold",
min_value=-10.0, max_value=10.0, value=0.0, step=1.0,
help="ma!",
)
retro_reader.n_best_size = st.sidebar.slider(
label="n_best_size",
min_value=1, max_value=50, value=20, step=1,
help="ma!",
)
retro_reader.beta1 = st.sidebar.slider(
label="beta1",
min_value=-10.0, max_value=10.0, value=1.0, step=1.0,
help="ma!",
)
retro_reader.beta2 = st.sidebar.slider(
label="beta2",
min_value=-10.0, max_value=10.0, value=1.0, step=1.0,
help="ma!",
)
retro_reader.best_cof = st.sidebar.slider(
label="best_cof",
min_value=-10.0, max_value=10.0, value=1.0, step=1.0,
help="ma!",
)
return_submodule_outputs = st.sidebar.checkbox('return_submodule_outputs', value=False)
st.markdown("## Demonstration")
with st.form(key="my_form"):
query = st.text_input(
label="Type your query",
value=getattr(C, f"{lang_prefix}_EXAMPLE_QUERY"),
max_chars=None,
help=getattr(C, f"{lang_prefix}_QUERY_HELP_TEXT"),
)
context = st.text_area(
label="Type your context",
value=getattr(C, f"{lang_prefix}_EXAMPLE_CONTEXTS"),
height=height,
max_chars=None,
help=getattr(C, f"{lang_prefix}_CONTEXT_HELP_TEXT"),
)
submit_button = st.form_submit_button(label="Submit")
if submit_button:
with st.spinner("Please wait.."):
outputs = retro_reader(
query=query,
context=context,
return_submodule_outputs=return_submodule_outputs,
)
answer, score = outputs[0]["id-01"], outputs[1]
if not answer:
answer = "No answer"
st.markdown("## Results")
st.write(answer)
st.markdown("### Rear Verification Score")
st.json(score)
if return_submodule_outputs:
score_ext, nbest_preds, score_diff = outputs[2:]
st.markdown("### Sketch Reader Score (score_ext)")
st.json(score_ext)
st.markdown("### Intensive Reader Score (score_diff)")
st.json(score_diff)
st.markdown("### N Best Predictions (from intensive reader)")
st.json(nbest_preds)
if __name__ == "__main__":
main()
``` |
{
"source": "jinmeiib/dnstap-receiver",
"score": 2
} |
#### File: dnstap-receiver/dnstap_receiver/receiver.py
```python
import argparse
import logging
import asyncio
import socket
import yaml
import sys
import re
import ssl
import pkgutil
import ipaddress
from datetime import datetime, timezone
# python3 -m pip dnspython
import dns.rcode
import dns.rdatatype
import dns.message
# wget https://raw.githubusercontent.com/dnstap/dnstap.pb/master/dnstap.proto
# wget https://github.com/protocolbuffers/protobuf/releases/download/v3.13.0/protoc-3.13.0-linux-x86_64.zip
# python3 -m pip install protobuf
# bin/protoc --python_out=. dnstap.proto
from dnstap_receiver import dnstap_pb2 # more informations on dnstap http://dnstap.info/
from dnstap_receiver import fstrm # framestreams decoder
from dnstap_receiver import output_stdout
from dnstap_receiver import output_syslog
from dnstap_receiver import output_tcp
from dnstap_receiver import output_metrics
DNSTAP_TYPE = { 1: 'AUTH_QUERY', 2: 'AUTH_RESPONSE',
3: 'RESOLVER_QUERY', 4: 'RESOLVER_RESPONSE',
5: 'CLIENT_QUERY', 6: 'CLIENT_RESPONSE',
7: 'FORWARDER_QUERY', 8: 'FORWARDER_RESPONSE',
9: 'STUB_QUERY', 10: 'STUB_RESPONSE',
11: 'TOOL_QUERY', 2: 'TOOL_RESPONSE' }
DNSTAP_FAMILY = {1: 'IP4', 2: 'IP6'}
DNSTAP_PROTO = {1: 'UDP', 2: 'TCP'}
# command line arguments definition
parser = argparse.ArgumentParser()
parser.add_argument("-l",
help="IP of the dnsptap server to receive dnstap payloads (default: %(default)r)",
default="0.0.0.0")
parser.add_argument("-p", type=int,
help="Port the dnstap receiver is listening on (default: %(default)r)",
default=6000)
parser.add_argument("-u", help="read dnstap payloads from unix socket")
parser.add_argument('-v', action='store_true', help="verbose mode")
parser.add_argument("-c", help="external config file")
import dns.exception
import dns.opcode
import dns.flags
class _WireReader(dns.message._WireReader):
def read(self):
"""issue fixed - waiting fix with dnspython 2.1"""
if self.parser.remaining() < 12:
raise dns.message.ShortHeader
(id, flags, qcount, ancount, aucount, adcount) = \
self.parser.get_struct('!HHHHHH')
factory = dns.message._message_factory_from_opcode(dns.opcode.from_flags(flags))
self.message = factory(id=id)
self.message.flags = flags
self.initialize_message(self.message)
self.one_rr_per_rrset = \
self.message._get_one_rr_per_rrset(self.one_rr_per_rrset)
self._get_question(dns.message.MessageSection.QUESTION, qcount)
return self.message
def from_wire(wire, question_only=True):
"""decode wire message - waiting fix with dnspython 2.1"""
raise_on_truncation=False
def initialize_message(message):
message.request_mac = b''
message.xfr = False
message.origin = None
message.tsig_ctx = None
reader = _WireReader(wire, initialize_message, question_only=question_only,
one_rr_per_rrset=False, ignore_trailing=False,
keyring=None, multi=False)
try:
m = reader.read()
except dns.exception.FormError:
if reader.message and (reader.message.flags & dns.flags.TC) and \
raise_on_truncation:
raise dns.message.Truncated(message=reader.message)
else:
raise
# Reading a truncated message might not have any errors, so we
# have to do this check here too.
if m.flags & dns.flags.TC and raise_on_truncation:
raise dns.message.Truncated(message=m)
return m
async def cb_ondnstap(dnstap_decoder, payload, cfg, queue, metrics):
"""on dnstap"""
# decode binary payload
dnstap_decoder.ParseFromString(payload)
dm = dnstap_decoder.message
# filtering by dnstap identity ?
tap_ident = dnstap_decoder.identity.decode()
if not len(tap_ident):
tap_ident = "-"
if cfg["filter"]["dnstap-identities"] is not None:
if re.match(cfg["filter"]["dnstap-identities"], dnstap_decoder.identity.decode()) is None:
del dm
return
tap = { "identity": tap_ident,
"query-name": "-",
"query-type": "-",
"source-ip": "-"}
# decode type message
tap["message"] = DNSTAP_TYPE.get(dm.type, "-")
tap["protocol"] = DNSTAP_FAMILY.get(dm.socket_family, "-")
tap["transport"] = DNSTAP_PROTO.get(dm.socket_protocol, "-")
# decode query address
if len(dm.query_address) and dm.socket_family == 1:
tap["source-ip"] = socket.inet_ntoa(dm.query_address)
if len(dm.query_address) and dm.socket_family == 2:
tap["source-ip"] = socket.inet_ntop(socket.AF_INET6, dm.query_address)
tap["source-port"] = dm.query_port
if tap["source-port"] == 0:
tap["source-port"] = "-"
# handle query message
if (dm.type % 2 ) == 1 :
dnstap_parsed = from_wire(dm.query_message,
question_only=True)
tap["length"] = len(dm.query_message)
d1 = dm.query_time_sec + (round(dm.query_time_nsec ) / 1000000000)
tap["timestamp"] = datetime.fromtimestamp(d1, tz=timezone.utc).isoformat()
# handle response message
if (dm.type % 2 ) == 0 :
dnstap_parsed = from_wire(dm.response_message,
question_only=True)
tap["length"] = len(dm.response_message)
d2 = dm.response_time_sec + (round(dm.response_time_nsec ) / 1000000000)
tap["timestamp"] = datetime.fromtimestamp(d2, tz=timezone.utc).isoformat()
# common params
if len(dnstap_parsed.question):
tap["query-name"] = dnstap_parsed.question[0].name
tap["query-type"] = dns.rdatatype.to_text(dnstap_parsed.question[0].rdtype)
tap["code"] = dns.rcode.to_text(dnstap_parsed.rcode())
# filtering by qname ?
if cfg["filter"]["qname-regex"] is not None:
if re.match(cfg["filter"]["qname-regex"], tap["query-name"]) is None:
del dm; del tap;
return
# update metrics
metrics.record_dnstap(dnstap=tap)
# finally add decoded tap message in queue for outputs
# except for metrics
# if cfg["output"]["metrics"]["enable"]:
# return
queue.put_nowait(tap)
async def cb_onconnect(reader, writer, cfg, queue, metrics):
"""callback when a connection is established"""
# get peer name
peername = writer.get_extra_info('peername')
if not len(peername):
peername = "(unix-socket)"
logging.debug(f"Input handler: new connection from {peername}")
# access control list check
if len(writer.get_extra_info('peername')):
acls_network = []
for a in cfg["input"]["tcp-socket"]["access-control-list"]:
acls_network.append(ipaddress.ip_network(a))
acl_allow = False
for acl in acls_network:
if ipaddress.ip_address(peername[0]) in acl:
acl_allow = True
if not acl_allow:
writer.close()
logging.debug("Input handler: checking acl refused")
return
logging.debug("Input handler: checking acl allowed")
# prepare frame streams decoder
fstrm_handler = fstrm.FstrmHandler()
loop = asyncio.get_event_loop()
dnstap_decoder = dnstap_pb2.Dnstap()
try:
# syntax only works with python 3.8
# while data := await reader.read(fstrm_handler.pending_nb_bytes())
running = True
while running:
# read bytes
data = await reader.read(fstrm_handler.pending_nb_bytes())
if not len(data):
running = False
break
# append data to the buffer
fstrm_handler.append(data=data)
# process the buffer, check if we have received a complete frame ?
if fstrm_handler.process():
# Ok, the frame is complete so let's decode it
fs, payload = fstrm_handler.decode()
# handle the DATA frame
if fs == fstrm.FSTRM_DATA_FRAME:
loop.create_task(cb_ondnstap(dnstap_decoder, payload, cfg, queue, metrics))
# handle the control frame READY
if fs == fstrm.FSTRM_CONTROL_READY:
logging.debug(f"Input handler: control ready received from {peername}")
ctrl_accept = fstrm_handler.encode(fs=fstrm.FSTRM_CONTROL_ACCEPT)
# respond with accept only if the content type is dnstap
writer.write(ctrl_accept)
await writer.drain()
logging.debug(f"Input handler: sending control accept to {peername}")
# handle the control frame READY
if fs == fstrm.FSTRM_CONTROL_START:
logging.debug(f"Input handler: control start received from {peername}")
# handle the control frame STOP
if fs == fstrm.FSTRM_CONTROL_STOP:
logging.debug(f"Input handler: control stop received from {peername}")
fstrm_handler.reset()
except asyncio.CancelledError:
logging.debug(f'Input handler: {peername} - closing connection.')
writer.close()
await writer.wait_closed()
except asyncio.IncompleteReadError:
logging.debug(f'Input handler: {peername} - disconnected')
finally:
logging.debug(f'Input handler: {peername} - closed')
class Metrics:
def prepare(self):
"""prepare stats"""
self.stats = {"total-queries": 0}
self.queries = {}
self.rtype = {}
self.rcode = {}
self.clients = {}
self.nxdomains = {}
self.proto = {}
self.family = {}
def reset(self):
"""reset statistics"""
del self.stats
del self.queries
del self.rtype
del self.rcode
del self.clients
del self.nxdomains
del self.proto
del self.family
self.prepare()
def record_dnstap(self, dnstap):
"""add dnstap message"""
self.stats["total-queries"] += 1
if dnstap["transport"] not in self.proto:
self.proto[dnstap["transport"]] = 1
else:
self.proto[dnstap["transport"]] += 1
if dnstap["protocol"] not in self.family:
self.family[dnstap["protocol"]] = 1
else:
self.family[dnstap["protocol"]] += 1
if dnstap["query-name"] not in self.queries:
self.queries[dnstap["query-name"]] = 1
else:
self.queries[dnstap["query-name"]] += 1
if dnstap["source-ip"] not in self.clients:
self.clients[dnstap["source-ip"]] = 1
else:
self.clients[dnstap["source-ip"]] += 1
if dnstap["query-type"] not in self.rtype:
self.rtype[dnstap["query-type"]] = 1
else:
self.rtype[dnstap["query-type"]] += 1
if dnstap["code"] not in self.rcode:
self.rcode[dnstap["code"]] = 1
else:
self.rcode[dnstap["code"]] += 1
def start_receiver():
"""start dnstap receiver"""
# Handle command-line arguments.
args = parser.parse_args()
# set default config
try:
cfg = yaml.safe_load(pkgutil.get_data(__package__, 'dnstap.conf'))
except FileNotFoundError:
logging.error("default config file not found")
sys.exit(1)
except yaml.parser.ParserError:
logging.error("invalid default yaml config file")
sys.exit(1)
# update default config with command line arguments
cfg["verbose"] = args.v
cfg["input"]["unix-socket"]["path"] = args.u
cfg["input"]["tcp-socket"]["local-address"] = args.l
cfg["input"]["tcp-socket"]["local-port"] = args.p
# overwrite config with external file ?
if args.c:
try:
with open(args.c) as file:
cfg.update( yaml.safe_load(file) )
except FileNotFoundError:
logging.error("external config file not found")
sys.exit(1)
except yaml.parser.ParserError:
logging.error("external invalid yaml config file")
sys.exit(1)
# init logging
level = logging.INFO
if cfg["verbose"]:
level = logging.DEBUG
logging.basicConfig(format='%(asctime)s %(message)s', stream=sys.stdout, level=level)
# start receiver and get event loop
logging.debug("Start receiver...")
loop = asyncio.get_event_loop()
# prepare output
queue = asyncio.Queue()
metrics = Metrics()
metrics.prepare()
if cfg["output"]["syslog"]["enable"]:
logging.debug("Output handler: syslog")
loop.create_task(output_syslog.handle(cfg["output"]["syslog"],
queue,
metrics))
if cfg["output"]["tcp-socket"]["enable"]:
logging.debug("Output handler: tcp")
loop.create_task(output_tcp.handle(cfg["output"]["tcp-socket"],
queue,
metrics))
if cfg["output"]["stdout"]["enable"]:
logging.debug("Output handler: stdout")
loop.create_task(output_stdout.handle(cfg["output"]["stdout"],
queue,
metrics))
if cfg["output"]["metrics"]["enable"]:
logging.debug("Output handler: metrics")
loop.create_task(output_metrics.handle(cfg["output"]["metrics"],
queue,
metrics))
# asynchronous unix socket
if cfg["input"]["unix-socket"]["path"] is not None:
logging.debug("Input handler: unix socket")
logging.debug("Input handler: listening on %s" % args.u)
socket_server = asyncio.start_unix_server(lambda r, w: cb_onconnect(r, w, cfg, queue, metrics),
path=cfg["input"]["unix-socket"]["path"],
loop=loop)
# default mode: asynchronous tcp socket
else:
logging.debug("Input handler: tcp socket")
ssl_context = None
if cfg["input"]["tcp-socket"]["tls-support"]:
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(certfile=cfg["input"]["tcp-socket"]["tls-server-cert"],
keyfile=cfg["input"]["tcp-socket"]["tls-server-key"])
logging.debug("Input handler - tls support enabled")
logging.debug("Input handler: listening on %s:%s" % (cfg["input"]["tcp-socket"]["local-address"],
cfg["input"]["tcp-socket"]["local-port"])),
socket_server = asyncio.start_server(lambda r, w: cb_onconnect(r, w, cfg, queue, metrics),
cfg["input"]["tcp-socket"]["local-address"],
cfg["input"]["tcp-socket"]["local-port"],
ssl=ssl_context,
loop=loop)
# run until complete
loop.run_until_complete(socket_server)
# run event loop
try:
loop.run_forever()
except KeyboardInterrupt:
pass
```
#### File: dnstap-receiver/tests/test_receiver_unixsocket.py
```python
import time
import unittest
import subprocess
import dns.resolver
my_resolver = dns.resolver.Resolver(configure=False)
my_resolver.nameservers = ['127.0.0.1']
import shlex
class TestUnixSocket(unittest.TestCase):
def test1_listening(self):
"""test listening unix socket"""
cmd = 'su - _dnsdist -s /bin/bash -c \'python3 -c "from dnstap_receiver.receiver import start_receiver; start_receiver()" -u /var/run/dnsdist/dnstap.sock -v\''
args = shlex.split(cmd)
with subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
time.sleep(2)
proc.terminate()
o = proc.stdout.read()
print(o)
self.assertRegex(o, b"listening on /var/run/dnsdist/dnstap.sock")
def test2_incoming_dnstap(self):
"""test to receive dnstap message"""
cmd = 'su - _dnsdist -s /bin/bash -c \'python3 -c "from dnstap_receiver.receiver import start_receiver; start_receiver()" -u /var/run/dnsdist/dnstap.sock -v\''
args = shlex.split(cmd)
with subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
for i in range(10):
r = my_resolver.resolve('www.github.com', 'a')
time.sleep(1)
proc.terminate()
o = proc.stdout.read()
print(o)
self.assertRegex(o, b"dnsdist-unix CLIENT_RESPONSE")
``` |
{
"source": "jinmel/Text2Colors",
"score": 2
} |
#### File: jinmel/Text2Colors/util.py
```python
import numpy as np
import torch
import torch.nn as nn
import warnings
from skimage.color import lab2rgb, rgb2lab
# ======================== For text embeddings ======================== #
SOS_token = 0
EOS_token = 1
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Dictionary:
def __init__(self):
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2
self.max_len = 0
def index_elements(self, data):
for element in data:
self.max_len = len(data) if self.max_len < len(data) else self.max_len
self.index_element(element)
def index_element(self, element):
if element not in self.word2index:
self.word2index[element] = self.n_words
self.word2count[element] = 1
self.index2word[self.n_words] = element
self.n_words += 1
else:
self.word2count[element] += 1
def load_pretrained_embedding(dictionary, embed_file, embed_dim):
if embed_file is None: return None
pretrained_embed = {}
with open(embed_file, 'r', encoding='utf-8') as f:
for line in f:
tokens = line.split(' ')
word = tokens[0]
entries = tokens[1:]
if word == '<unk>':
continue
pretrained_embed[word] = entries
f.close()
vocab_size = len(dictionary) + 2
W_emb = np.random.randn(vocab_size, embed_dim).astype('float32')
n = 0
for word, index in dictionary.items():
if word in pretrained_embed:
W_emb[index, :] = pretrained_embed[word]
n += 1
print ("%d/%d vocabs are initialized with GloVe embeddings." % (n, vocab_size))
return W_emb
class Embed(nn.Module):
def __init__(self, vocab_size, embed_dim, W_emb, train_emb):
super(Embed, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_dim)
if W_emb is not None:
print ("Using pre-trained word embeddings...")
self.embed.weight = nn.Parameter(W_emb)
if train_emb == False:
print ("Not training word embeddings...")
self.embed.requires_grad = False
def forward(self, doc):
doc = self.embed(doc)
return doc
# ======================== For processing data ======================== #
def process_image(image_data, batch_size, imsize):
input = torch.zeros(batch_size, 1, imsize, imsize)
labels = torch.zeros(batch_size, 2, imsize, imsize)
images_np = image_data.numpy().transpose((0, 2, 3, 1))
for k in range(batch_size):
img_lab = rgb2lab(images_np[k], illuminant='D50')
img_l = img_lab[:, :, 0] / 100
input[k] = torch.from_numpy(np.expand_dims(img_l, 0))
img_a_scale = (img_lab[:, :, 1:2] + 88) / 185
img_b_scale = (img_lab[:, :, 2:3] + 127) / 212
img_ab_scale = np.concatenate((img_a_scale, img_b_scale), axis=2)
labels[k] = torch.from_numpy(img_ab_scale.transpose((2, 0, 1)))
return input, labels
def process_palette_ab(pal_data, batch_size):
img_a_scale = (pal_data[:, :, 1:2] + 88) / 185
img_b_scale = (pal_data[:, :, 2:3] + 127) / 212
img_ab_scale = np.concatenate((img_a_scale, img_b_scale), axis=2)
ab_for_global = torch.from_numpy(img_ab_scale).float()
ab_for_global = ab_for_global.view(batch_size, 10).unsqueeze(2).unsqueeze(2)
return ab_for_global
def process_palette_lab(pal_data, batch_size):
img_l = pal_data[:, :, 0:1] / 100
img_a_scale = (pal_data[:, :, 1:2] + 88) / 185
img_b_scale = (pal_data[:, :, 2:3] + 127) / 212
img_lab_scale = np.concatenate((img_l, img_a_scale, img_b_scale), axis=2)
lab_for_global = torch.from_numpy(img_lab_scale).float()
lab_for_global = lab_for_global.view(batch_size, 15).unsqueeze(2).unsqueeze(2)
return lab_for_global
def process_global_ab(input_ab, batch_size, always_give_global_hint):
X_hist = input_ab
if always_give_global_hint:
B_hist = torch.ones(batch_size, 1, 1, 1)
else:
B_hist = torch.round(torch.rand(batch_size, 1, 1, 1))
for l in range(batch_size):
if B_hist[l].numpy() == 0:
X_hist[l] = torch.rand(10)
global_input = torch.cat([X_hist, B_hist], 1)
return global_input
def process_global_lab(input_lab, batch_size, always_give_global_hint):
X_hist = input_lab
if always_give_global_hint:
B_hist = torch.ones(batch_size, 1, 1, 1)
else:
B_hist = torch.round(torch.rand(batch_size, 1, 1, 1))
for l in range(batch_size):
if B_hist[l].numpy() == 0:
X_hist[l] = torch.rand(15)
global_input = torch.cat([X_hist, B_hist], 1)
return global_input
def process_global_sampling_ab(palette, batch_size, imsize, hist_mean, hist_std):
X_hist = palette.to(device)
B_hist = torch.ones(batch_size, 1, 1, 1).to(device)
global_input = torch.cat([X_hist, B_hist], 1)
return global_input
def process_global_sampling_lab(palette, batch_size, imsize, hist_mean, hist_std):
X_hist = palette.to(device)
B_hist = torch.ones(batch_size, 1, 1, 1).to(device)
global_input = torch.cat([X_hist, B_hist], 1)
return global_input
# ============================= Etc. ============================= #
def KL_loss(mu, logvar):
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
def lab2rgb_1d(in_lab, clip=True):
warnings.filterwarnings("ignore")
tmp_rgb = lab2rgb(in_lab[np.newaxis, np.newaxis, :], illuminant='D50').flatten()
if clip:
tmp_rgb = np.clip(tmp_rgb, 0, 1)
return tmp_rgb
def init_weights_normal(m):
if type(m) == nn.Conv1d:
m.weight.data.normal_(0.0, 0.05)
if type(m) == nn.Linear:
m.weight.data.normal_(0.0, 0.05)
``` |
{
"source": "jinmingda/MicroorganismSearchEngine",
"score": 3
} |
#### File: mse/tests/test_controllers.py
```python
import unittest
import datetime
from turbogears import testutil
from mse.controllers import Root
from mse.model import User
class TestPages(testutil.TGTest):
root = Root
def test_method(self):
"""The index method should return a datetime.datetime called 'now'"""
response = self.app.get('/')
assert isinstance(response.raw['now'], datetime.datetime)
def test_index_title(self):
""""The index page should have the right title."""
response = self.app.get('/')
assert "<title>Welcome to TurboGears</title>" in response.body
def test_login_title(self):
"""The login page should have the right title."""
response = self.app.get('/login')
assert "<title>Login</title>" in response
assert "Please log in." in response
assert "session cookies" not in response
assert "credentials" not in response
assert "not correct" not in response
def test_login_errors(self):
"""The login page should display the right errors."""
login = '/login?user_name=nobody&password=<PASSWORD>&login=Login'
response = self.app.get(login)
assert "<title>Login</title>" in response
assert "session cookies" in response
cookie = ', '.join(map(str, response.cookies_set.values()))
response = self.app.get(login, headers=dict(Cookie=cookie))
assert "<title>Login</title>" in response
assert "credentials" in response
assert "not correct" in response
def test_login_and_logout(self):
"""Login with correct credentials and then logout."""
u = User(user_name=u"scott", password=u"<PASSWORD>",
display_name=u"<NAME>", email_address=u"<EMAIL>")
response = self.app.get('/')
assert "<title>Welcome to TurboGears</title>" in response
assert 'href="/login"' in response
assert 'href="/logout"' not in response
response = self.app.get('/login')
assert "<title>Login</title>" in response
assert 'Please log in.' in response
cookie = ', '.join(map(str, response.cookies_set.values()))
login = '/login?user_name=scott&password=<PASSWORD>&login=Login'
headers = dict(Cookie=cookie)
response = self.app.get(login, headers=headers, status=302)
location = response.headers['Location']
response = self.app.get(location, headers=headers)
assert "<title>Welcome to TurboGears</title>" in response
assert "Welcome <NAME>" in response
assert 'href="/login"' not in response
assert 'href="/logout"' in response
response = self.app.get('/', headers=headers)
assert "<title>Welcome to TurboGears</title>" in response
assert "Welcome <NAME>" in response
assert 'href="/login"' not in response
assert 'href="/logout"' in response
response = self.app.get('/logout', headers=headers, status=302)
location = response.headers['Location']
response = self.app.get(location, headers=headers)
assert "<title>Welcome to TurboGears</title>" in response
assert 'href="/login"' in response
assert 'href="/logout"' not in response
```
#### File: mse/tests/test_model.py
```python
from turbogears.testutil import DBTest
from turbogears.util import get_model
# import the User class defined in the model so we can use it here
try:
from mse.model import User, create_tables, create_default_user
except ImportError:
import warnings
warnings.warn("Identity model not found. Not running identity tests!")
User = None
from sqlobject import SQLObjectNotFound
from sqlobject.dberrors import OperationalError
def _create_test_user():
obj = User(user_name=u"creosote", email_address=u"<EMAIL>",
display_name=u"<NAME>", password=u"<PASSWORD>")
return obj
class TestUser(DBTest):
if User:
def test_user_creation(self):
"""Object creation should set the name."""
obj = _create_test_user()
retrieved_user = User.by_email_address(u'<EMAIL>')
assert retrieved_user, \
'User should have been found by email address'
assert retrieved_user.user_name == u'creosote', \
"User name should have been creosote, not '%s'" % retrieved_user.user_name
assert obj.display_name == u"<NAME>"
class TestBootstrap(DBTest):
def setUp(self):
if not self.model:
self.model = get_model()
if not self.model:
raise Exception("Unable to run database tests without a model")
if User:
def test_create_tables(self):
"""Test that model.create_tables correctly creates all database tables."""
self.assertRaises(OperationalError, User.by_user_name, u'test')
create_tables()
assert _create_test_user()
create_tables()
assert User.by_user_name(u'creosote')
create_tables(drop_all=True)
try:
user = User.by_user_name(u'creosote')
except SQLObjectNotFound:
user = None
assert user is None
def test_create_default_user(self):
"Test that the default user is created correctly"
create_tables()
create_default_user(u'creosote', u'secret')
retrieved_user = User.by_email_address(u'<EMAIL>' % u'creosote')
assert retrieved_user
assert retrieved_user.user_name == u'creosote'
assert retrieved_user.display_name == u'Default User'
assert retrieved_user.password
``` |
{
"source": "Jinming-Su/SGNet",
"score": 2
} |
#### File: SGNet/datasets/generate_vp_label_dist_culane.py
```python
import numpy as np
import cv2
import matplotlib.pyplot as plt
from glob import glob
import os
import random
import traceback
import tqdm
import traceback
import torch
color_list = [(0, 0, 255), (0, 255, 0), (0, 255, 255), (255, 0, 255),]
img_dir = 'datasets/culane/'
seg_label_dir = 'datasets/culane/laneseg_label_w16_new/'
#seg_line_label_dir ='dataset/seg_line_label'
rpn_label_dir = 'datasets/culane/rpn_label_new'
vp_label_dir = 'datasets/culane/vp_label_new_32'
H = 590
W = 1640
def plt_show(img):
plt.figure(figsize=(40, 40))
im2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(im2)
def get_anno(frame_mp4_number, seg_width = 16, show = False, path2write = None, offset = 10):
"""
display : 0, nothin; 1, show; 2, write
return : (y1, y2, x1, w1, \theta)
"""
#得到frame/mp4/number
seg_img_file = os.path.join(seg_label_dir, frame_mp4_number+'.png')
seg_img = cv2.imread(seg_img_file)
seg_img = seg_img * 50
seg_img_ori = seg_img.copy()
seg_img_gray = cv2.cvtColor(seg_img, cv2.COLOR_BGR2GRAY)
#得到标注中有几条线,及其label
seg_labels = np.sort(np.unique(seg_img_gray))
#图中没有车道线
if len(seg_labels) == 1:
if(show):
plt_show(seg_img)
return (None, None, None)
#保存直线参数的list,一条直线用3个参数表示[A,B, C]
lines = []
#得到覆盖标注的最小矩形
w_annos = []
for seg_label in seg_labels[1:]:
rect, box_points = get_min_rect(seg_img_gray, seg_label)
y_center = rect[0][0];
x_center = rect[0][1];
w=rect[1][0]
h=rect[1][1]
##
##
##
w_annos.append(min(w, h))
angle = rect[2]
#求这个矩形代表的直线
angle_y = angle_convert(w, h, angle)
angle_x = angle_convert_x(w, h, angle)
A, B, C = get_line_equation(y_center, x_center, angle_y)
lines.append([A, B, C, angle_x])
#只有一条车道线的情况
if(len(lines) == 1):
x_anno = x_center - max(w, h)/2 * np.cos(angle_x*np.pi/180)
y_l = y_r = y_center - max(w, h)/2 * np.sin(angle_x*np.pi/180)
if show:
cv2.circle(seg_img, (int(x_anno), int(y_l)), 3, (0, 255, 0), thickness=3)
plt_show(seg_img)
return (y_l, y_r, [[x_anno, w_annos[0], angle_x]])
#求交点
x_inter_list=[]
y_inter_list=[]
for i in range(len(lines)-1):
for j in range(i+1, len(lines)):
#print(lines[i], lines[j])
x_inter, y_inter = get_inter_point(lines[i][0], lines[i][1], lines[i][2], lines[j][0], lines[j][1], lines[j][2])
x_inter_list.append(x_inter)
y_inter_list.append(y_inter)
#一个点直接回归直线
if(len(x_inter_list) == 1):
seg_img_modify_0 = seg_img.copy()
y_l = y_inter_list[0]
y_r = y_inter_list[0]
# cv2.line(seg_img_modify_0, (0, int(y_l)), (seg_img.shape[1], int(y_r)), (255, 0, 0), 3)
y_l = y_inter_list[0]
y_r = y_inter_list[0]
if show:
cv2.line(seg_img, (0, y_l), (seg_img.shape[1], y_r), (0, 0 , 255), 3 )
cv2.circle(seg_img, (x_inter_list[0], y_l), 3, (0, 255, 0), thickness=3 )
plt_show(seg_img)
return ( y_l, y_r, [[x_inter_list[0], w_annos[0], 0]] )
#得到 y=kx+b 的[k, b]
#kx-y+b=0
else:
line_anno_list = []
x_annos = []
seg_img_modify_10 = seg_img.copy()
horizon_line ,y_l, y_r = modify_points(x_inter_list.copy(), y_inter_list.copy(), seg_img_modify_10, offset=offset)
for line, w_anno in zip(lines, w_annos):
#print(line, horizon_line)
x_anno, _ = get_inter_point(line[0], line[1], line[2], horizon_line[0], -1, horizon_line[1])
x_annos.append(x_anno)
# x ,w ,theta
line_anno_list.append([ x_anno , w_anno, line[3]])
if show:
cv2.line(seg_img, (0, int(y_l)), (seg_img.shape[1], int(y_r)), (0, 0 , 255), 3 )
for line, x_anno in zip(lines, x_annos):
#矩形代表直线在水平分割线上交点
y_anno = (-line[2] + -line[0] * x_anno)/line[1]
cv2.circle(seg_img, (int(x_anno), int(y_anno)), 3, (0, 255, 0), thickness=3 )
plt_show(seg_img)
return (y_l, y_r, line_anno_list)
def plt_show(img):
im2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(im2)
def show_bounding_boxes(seg_anno_file):
img = cv2.imread(seg_anno_file)
img = img * 50
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#得到标注中有几条线,及其label
seg_labels = np.sort(np.unique(img_gray))
#得到覆盖标注的最小矩形
for seg_label in seg_labels[1:]:
x,y,w,h =get_boundingRect(img_gray, seg_label)
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 5)
_, box_points = get_min_rect(img_gray, seg_label)
cv2.drawContours(img, [box_points], 0, (0, 0, 255), 2)
plt_show(img)
def get_boundingRect(img_gray, seg_label):
#画最小标准形
cnt = (img_gray==seg_label).astype(np.uint8)
#cnt 二维图像或点集
x, y, w, h = cv2.boundingRect(cnt)
return x,y,w,h
# cv2.minAreaRect返回的width,height,angle比较难理解
# https://stackoverflow.com/questions/24073127/opencvs-rotatedrect-angle-does-not-provide-enough-information
# https://stackoverflow.com/questions/15956124/minarearect-angles-unsure-about-the-angle-returned/21427814#21427814
# 得到由y轴逆时针转到直线的角度
def angle_convert(width, height, angle):
assert width!=height
return 180+angle if width>height else angle + 90
return angle
# 得到由x轴正方向夹角
def angle_convert_x(width, height, angle):
assert width!=height
return 90-angle if width>height else -angle
return angle
# 返回在cv2的坐标下与y轴的夹角, 由点斜式
# angle是由y轴转到x轴的方向转到直线的角度
def get_line_equation(y0, x0, angle):
if angle == 90:
return (0, 1, -y0);
else:
k=np.tan(angle*np.pi/180)
#两点式
#x-x0=k(y-y0)
#x-ky+ky0-x0=0
return (1, -k, k*y0-x0)
def get_line_equation_x(x0, y0, angle):
if angle == 90:
return (0, 1, -y0);
else:
k=np.tan(angle*np.pi/180)
#两点式
#x-x0=k(y-y0)
#x-ky+ky0-x0=0
return (k, 1, -k*x0-y0)
def draw_line_equation_ori(img, A, B, C, y_max, color=(0, 255, 0), x_max=None):
#首先处理垂直y轴, A=0时需要x_max
if A == 0:
assert x_max
for x in range(x_max):
cv2.circle(img, (x, -int(C)), 5, color, 4)
return
for y in range(y_max):
x = (-B*y - C)/A
cv2.circle(img, (int(x), y), 1, color, 4)
# TODO : 考虑直线与矩形的相交具体怎么样, cv2.line 能画两个点在图外吗
def draw_line_equation(img, A, B, C, y_max, color = (0, 255, 0), x_max = None):
#def draw_line_equation(img, A, B, C, y_max = H, color = (0, 255, 0), x_max = None):
assert (A!=0 or B!=0)
if A==0:
y_lr = -C/B
cv2.line(img, (0, y_lr), (x_max, y_lr), color, 3)
else:
x1 = (-B*0 - C)/A
x2 = (-B*y_max - C)/A
cv2.line(img, (int(x1), 0), (int(x2), y_max), color, 3)
def get_inter_point(A1, B1, C1, A2, B2, C2):
#基于一般式,一般式 A,B不全为0
assert (A1!=0 or B1!=0) and (A2!=0 or B2!=0)
m=A1*B2-A2*B1
if m==0:
return (-1000, -1000)
else:
x=(C2*B1-C1*B2)/m
y=(C1*A2-C2*A1)/m
return x,y
#在点集的两边增加点集最小x,最大x的两个在一条水平线上的点
def modify_points(x_inter_list, y_inter_list, seg_img, draw = False, offset=0):
y_inter_center = np.mean(y_inter_list)
x_inter_min = np.min(x_inter_list) - offset
x_inter_max = np.max(x_inter_list) + offset
horizon_line = np.polyfit(x_inter_list, y_inter_list, deg=1)
x_inter_list.append(x_inter_min)
y_inter_list.append(y_inter_center)
x_inter_list.append(x_inter_max)
y_inter_list.append(y_inter_center)
horizon_line = np.polyfit(x_inter_list, y_inter_list, deg=1)
y_l = horizon_line[1]
y_r = horizon_line[0] * seg_img.shape[1] + horizon_line[1]
if draw:
cv2.circle(seg_img, (int(x_inter_min),int(y_inter_center)), 3, (0, 255, 255), thickness=3)
cv2.circle(seg_img, (int(x_inter_max),int(y_inter_center)), 3, (0, 255, 255), thickness=3)
cv2.line(seg_img, (0, int(y_l)), (seg_img.shape[1], int(y_r)), (255, 0, 0), 3)
return horizon_line , y_l, y_r
#对比不同方法得到的horizon_line
def contrast_horizon_line(frame_mp4_number, display, path2write = None):
"""
display : 0, nothin; 1, show; 2, write
"""
ori_img = cv2.imread(os.path.join(img_dir, frame_mp4_number+'.jpg'))
seg_anno_file = os.path.join(seg_label_dir, frame_mp4_number+'.png')
seg_img = cv2.imread(seg_anno_file)
seg_img = seg_img * 50
seg_img_ori = seg_img.copy()
seg_img_gray = cv2.cvtColor(seg_img, cv2.COLOR_BGR2GRAY)
#得到标注中有几条线,及其label
seg_labels = np.sort(np.unique(seg_img_gray))
if len(seg_labels) == 1:
return -1,-1
#保存直线参数的list,一条直线用3个参数表示[A,B, C]
lines = []
#得到覆盖标注的最小矩形
for seg_label in seg_labels[1:]:
x,y,w,h =get_boundingRect(seg_img_gray, seg_label)
# if display>0:
# cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2)
rect, box_points = get_min_rect(seg_img_gray, seg_label)
y_center = rect[0][0];
x_center = rect[0][1];
w=rect[1][0]
h=rect[1][1]
angle = rect[2]
if display>0:
cv2.drawContours(seg_img, [box_points], 0, (0, 0, 255), 1)
#求这个矩形代表的直线
angle = angle_convert(w, h, angle)
A, B, C = get_line_equation(y_center, x_center, angle)
if display>0:
draw_line_equation(seg_img, A, B ,C, seg_img.shape[0])
lines.append([A, B, C])
#求交点
x_inter_list=[]
y_inter_list=[]
for i in range(len(lines)):
for j in range(i+1, len(lines)):
x_inter, y_inter = get_inter_point(lines[i][0], lines[i][1], lines[i][2], lines[j][0], lines[j][1], lines[j][2])
x_inter_list.append(x_inter)
y_inter_list.append(y_inter)
if display>0:
cv2.circle(seg_img, (int(x_inter),int(y_inter)), 3, (0, 255, 0), thickness=3)
#改进前
seg_img_copy = seg_img.copy()
horizon_line = np.polyfit(x_inter_list, y_inter_list, deg=1)
y_l = horizon_line[1]
y_r = horizon_line[0] * seg_img.shape[1] + horizon_line[1]
cv2.line(seg_img_copy, (0, int(y_l)), (seg_img.shape[1], int(y_r)), (255, 0, 0), 3)
#改进前
#一个点直接回归直线
if(len(x_inter_list) == 1):
seg_img_modify_0 = seg_img.copy()
y_l = y_inter_list[0]
y_r = y_inter_list[0]
cv2.line(seg_img_modify_0, (0, int(y_l)), (seg_img.shape[1], int(y_r)), (255, 0, 0), 3)
y_l = y_inter_list[0]
y_r = y_inter_list[0]
if display==1:
plt_show(np.vstack(( np.hstack((seg_img_ori, seg_img_copy)), np.hstack((ori_img, seg_img_modify_0)) )) )
elif display == 2:
if not os.path.exists(path2write):
os.makedirs(path2write)
cv2.imwrite(os.path.join(path2write, '_'.join(name_split_list[-3:])), \
np.vstack(( np.hstack((seg_img_ori, seg_img_copy)), np.hstack((ori_img, seg_img_modify_0)) )) )
#得到 y=kx+b 的[k, b]
#kx-y+b=0
else:
#添加点集左右两边的点
seg_img_modify_0 = seg_img.copy()
modify_points(x_inter_list.copy(), y_inter_list.copy(), seg_img_modify_0 )
#offset 5
seg_img_modify_5 = seg_img.copy()
modify_points(x_inter_list.copy(), y_inter_list.copy(), seg_img_modify_5, 5)
#offset 10
seg_img_modify_10 = seg_img.copy()
horizon_line ,y_l, y_r = modify_points(x_inter_list.copy(), y_inter_list.copy(), seg_img_modify_10, 20)
if display==1:
plt_show(np.vstack((np.hstack((seg_img_ori, seg_img_copy, seg_img_modify_0)), \
np.hstack((ori_img, seg_img_modify_5, seg_img_modify_10)))))
elif display == 2:
if not os.path.exists(path2write):
os.makedirs(path2write)
cv2.imwrite(os.path.join(path2write, '_'.join(name_split_list[-3:])), \
np.vstack((np.hstack((seg_img_ori, seg_img_copy, seg_img_modify_0)), \
np.hstack((ori_img, seg_img_modify_5, seg_img_modify_10)))))
return y_l, y_r
# y1, y2
# x1, w1, \theta
def get_anno(frame_mp4_number, seg_width = 16, show = False, path2write = None, offset = 10):
"""
display : 0, nothin; 1, show; 2, write
return : (y1, y2, x1, w1, \theta)
"""
#得到frame/mp4/number
seg_img_file = os.path.join(seg_label_dir, frame_mp4_number+'.png')
seg_img = cv2.imread(seg_img_file)
H, W = seg_img.shape[:2]
seg_img = seg_img * 50
seg_img_ori = seg_img.copy()
seg_img_gray = cv2.cvtColor(seg_img, cv2.COLOR_BGR2GRAY)
#得到标注中有几条线,及其label
seg_labels = np.sort(np.unique(seg_img_gray))
#图中没有车道线
if len(seg_labels) == 1:
if(show):
plt_show(seg_img)
return (None, None, None)
#保存直线参数的list,一条直线用3个参数表示[A,B, C]
lines = []
#得到覆盖标注的最小矩形
w_annos = []
for seg_label in seg_labels[1:]:
rect, box_points, (x3, x4) = get_min_rect(seg_img_gray, seg_label)
cv2.drawContours(seg_img, [box_points], 0, (0, 0, 255), 2)
y_center = rect[0][0];
x_center = rect[0][1];
w=rect[1][0]
h=rect[1][1]
angle = rect[2]
if display>0:
cv2.drawContours(seg_img, [box_points], 0, (0, 0, 255), 1)
#求这个矩形代表的直线
angle = angle_convert(w, h, angle)
A, B, C = get_line_equation(y_center, x_center, angle)
if display>0:
draw_line_equation(seg_img, A, B ,C, seg_img.shape[0])
lines.append([A, B, C])
A, B, C = get_line_equation(y_center, x_center, angle_y)
lines.append([A, B, C, angle_x])
plt_show(seg_img)
#只有一条车道线的情况
if(len(lines) == 1):
x_anno = x_center - max(w, h)/2 * np.cos(angle_x*np.pi/180)
y_l = y_r = y_center - max(w, h)/2 * np.sin(angle_x*np.pi/180)
if show:
cv2.circle(seg_img, (int(x_anno), int(y_l)), 3, (0, 255, 0), thickness=3)
plt_show(seg_img)
return (y_l, y_r, [[x_anno, w_annos[0], angle_x]])
#求交点
x_inter_list=[]
y_inter_list=[]
for i in range(len(lines)-1):
for j in range(i+1, len(lines)):
#print(lines[i], lines[j])
x_inter, y_inter = get_inter_point(lines[i][0], lines[i][1], lines[i][2], lines[j][0], lines[j][1], lines[j][2])
x_inter_list.append(x_inter)
y_inter_list.append(y_inter)
#一个点直接回归直线
if(len(x_inter_list) == 1):
seg_img_modify_0 = seg_img.copy()
y_l = y_inter_list[0]
y_r = y_inter_list[0]
# cv2.line(seg_img_modify_0, (0, int(y_l)), (seg_img.shape[1], int(y_r)), (255, 0, 0), 3)
y_l = y_inter_list[0]
y_r = y_inter_list[0]
if show:
cv2.line(seg_img, (0, y_l), (seg_img.shape[1], y_r), (0, 0 , 255), 3 )
cv2.circle(seg_img, (x_inter_list[0], y_l), 3, (0, 255, 0), thickness=3 )
plt_show(seg_img)
return ( y_l, y_r, [[x_inter_list[0], w_annos[0], 0]] )
#得到 y=kx+b 的[k, b]
#kx-y+b=0
else:
line_anno_list = []
x_annos = []
seg_img_modify_10 = seg_img.copy()
horizon_line ,y_l, y_r = modify_points(x_inter_list.copy(), y_inter_list.copy(), seg_img_modify_10, offset=offset)
for line, w_anno in zip(lines, w_annos):
#print(line, horizon_line)
x_anno, _ = get_inter_point(line[0], line[1], line[2], horizon_line[0], -1, horizon_line[1])
x_annos.append(x_anno)
# x ,w ,theta
line_anno_list.append([ x_anno , w_anno, line[3]])
if show:
cv2.line(seg_img, (0, int(y_l)), (seg_img.shape[1], int(y_r)), (0, 0 , 255), 3 )
for line, x_anno in zip(lines, x_annos):
#矩形代表直线在水平分割线上交点
y_anno = (-line[2] + -line[0] * x_anno)/line[1]
cv2.circle(seg_img, (int(x_anno), int(y_anno)), 3, (0, 255, 0), thickness=3 )
plt_show(seg_img)
return (y_l, y_r, line_anno_list)
def distance(p1, p2):
return np.linalg.norm(np.array(p1)-np.array(p2))
def draw_rot_rect(rect, img):
box_points = cv2.boxPoints(rect)
box_points = np.int0(box_points)
#draw contour里 x坐标在y前, 两列交换
box_points = box_points[:, ::-1]
cv2.drawContours(img, [box_points], 0, color_list[0], 2)
#求在图内的最小矩形
def get_min_rect(img_gray, seg_label, seg_img):
points = np.where(img_gray==seg_label)
points = np.stack(points, axis=1)
#输入点集
rect = cv2.minAreaRect(points)
box_points = cv2.boxPoints(rect)
box_points = np.int0(box_points)
#draw contour里 x坐标在y前, 两列交换
box_points = box_points[:, ::-1]
y_center = rect[0][0];
x_center = rect[0][1];
w=rect[1][0]
h=rect[1][1]
w_anno = min(w, h)
h_anno = max(w, h)
angle = rect[2]
#求这个矩形代表的直线
angle_y = angle_convert(w, h, angle)
angle_x = angle_convert_x(w, h, angle)
x_upper_center = x_center - h_anno/2 * np.cos(angle_x * np.pi / 180)
y_upper_center = y_center - h_anno/2 * np.sin(angle_x * np.pi / 180)
#短边与x轴的夹角
angle_v_x = angle_x + 90 if angle_x < 90 else angle_x - 90
x1 = x_upper_center + w_anno/2 * np.cos(angle_v_x * np.pi / 180)
x2 = x_upper_center - w_anno/2 * np.cos(angle_v_x * np.pi / 180)
y1 = y_upper_center + w_anno/2 * np.sin(angle_v_x * np.pi / 180)
y2 = y_upper_center - w_anno/2 * np.sin(angle_v_x * np.pi / 180)
#check短边两点是否正确
# cv2.circle(seg_img, (int(x_upper_center),int(y_upper_center)), 1, (0, 255, 0), thickness=2)
# cv2.circle(seg_img, (int(x1),int(y1)), 1, (0, 255, 0), thickness=2)
# cv2.circle(seg_img, (int(x2),int(y2)), 1, (0, 255, 0), thickness=2)
line_1 = get_line_equation(y1, x1, angle_y)
line_2 = get_line_equation(y2, x2, angle_y)
#draw_line_equation(img, A, B, C, y_max = H, color = (0, 255, 0), x_max = None):
#def get_inter_point(A1, B1, C1, A2, B2, C2):
# draw_line_equation(seg_img, line_1[0], line_1[1], line_1[2], x_max = W)
# draw_line_equation(seg_img, line_2[0], line_2[1], line_2[2], x_max = W)
#与y=H-1交点
x_inter_1_h, y_inter_1_h = get_inter_point(line_1[0], line_1[1], line_1[2], 0, 1, -H+1)
x_inter_2_h, y_inter_2_h = get_inter_point(line_2[0], line_2[1], line_2[2], 0, 1, -H+1)
#与x=0或W-1交点
W_or_0 = 0 if w>h else W-1
x_inter_1_w, y_inter_1_w = get_inter_point(line_1[0], line_1[1], line_1[2], 1, 0, -W_or_0)
x_inter_2_w, y_inter_2_w = get_inter_point(line_2[0], line_2[1], line_2[2], 1, 0, -W_or_0)
# cv2.circle(seg_img, (int(x_inter_1),int(y_inter_1)), 1, (0, 255, 0), thickness=2)
# cv2.circle(seg_img, (int(x_inter_2),int(y_inter_2)), 1, (0, 255, 0), thickness=2)
if distance([x1,y1], [x_inter_1_h, y_inter_1_h]) < distance([x1,y1], [x_inter_1_w, y_inter_1_w]):
x_inter_1, y_inter_1 = x_inter_1_h, y_inter_1_h
distance_1 = distance([x1,y1], [x_inter_1_h, y_inter_1_h])
else:
x_inter_1, y_inter_1 = x_inter_1_w, y_inter_1_w
distance_1 = distance([x1,y1], [x_inter_1_w, y_inter_1_w])
if distance([x2,y2], [x_inter_2_h, y_inter_2_h]) < distance([x2,y2], [x_inter_2_w, y_inter_2_w]):
x_inter_2, y_inter_2 = x_inter_2_h, y_inter_2_h
distance_2 = distance([x2, y2], [x_inter_2_h, y_inter_2_h])
else:
x_inter_2, y_inter_2 = x_inter_2_w, y_inter_2_w
distance_2 = distance([x2,y2], [x_inter_2_w, y_inter_2_w])
if distance_1 < distance_2 :
cut_h = h_anno - distance_1
x3, y3 = x_inter_1, y_inter_1
x4 = x3 - w_anno * np.cos(angle_v_x * np.pi / 180)
y4 = y3 - w_anno * np.sin(angle_v_x * np.pi / 180)
else:
cut_h = h_anno - distance_2
x4, y4 = x_inter_2, y_inter_2
x3 = x4 + w_anno * np.cos(angle_v_x * np.pi / 180)
y3 = y4 + w_anno * np.sin(angle_v_x * np.pi / 180)
#cv2.circle(seg_img, (int(x3),int(y3)), 3, (0, 255, 0), thickness=2)
#cv2.circle(seg_img, (int(x4),int(y4)), 3, (0, 0, 255), thickness=2)
# print('x3, y3', x3, y3)
# print('x4, y4', x4, y4)
if min(distance_1, distance_2) <= h_anno:
new_x_center = x_center - cut_h / 2 * np.cos(angle_x * np.pi / 180)
new_y_center = y_center - cut_h / 2 * np.sin(angle_x * np.pi / 180)
if w>h:
new_rect = ((new_y_center, new_x_center), (w-cut_h, h), angle)
else:
new_rect = ((new_y_center, new_x_center), (w, h-cut_h), angle)
new_box_points = cv2.boxPoints(new_rect)
new_box_points = np.int0(new_box_points)
#draw contour里 x坐标在y前, 两列交换
new_box_points = new_box_points[:, ::-1]
return new_rect, new_box_points
else:
return rect, box_points
#求在图内的最小矩形, 求与矩形的交点连线
def get_min_rect_v2(img_gray, seg_label, seg_img):
points = np.where(img_gray==seg_label)
points = np.stack(points, axis=1)
#输入点集
rect = cv2.minAreaRect(points)
box_points = cv2.boxPoints(rect)
box_points = np.int0(box_points)
#draw contour里 x坐标在y前, 两列交换
box_points = box_points[:, ::-1]
#cv2.drawContours(seg_img, [box_points], 0, (0, 0, 255), 1)
#plt_show(seg_img)
y_center = rect[0][0];
x_center = rect[0][1];
w=rect[1][0]
h=rect[1][1]
w_anno = min(w, h)
h_anno = max(w, h)
angle = rect[2]
#求这个矩形代表的直线
angle_y = angle_convert(w, h, angle)
angle_x = angle_convert_x(w, h, angle)
x_upper_center = x_center - h_anno/2 * np.cos(angle_x * np.pi / 180)
y_upper_center = y_center - h_anno/2 * np.sin(angle_x * np.pi / 180)
#长边中心线
line = get_line_equation(y_center, x_center, angle_y)
#draw_line_equation(seg_img, line[0], line[1], line[2], W)
#与y=H-1交点
x_inter_h, y_inter_h = get_inter_point(line[0], line[1], line[2], 0, 1, -H+1)
#与x=0或W-1交点
W_or_0 = 0 if w>h else W-1
x_inter_w, y_inter_w = get_inter_point(line[0], line[1], line[2], 1, 0, -W_or_0)
if distance([x_center,y_center], [x_inter_h, y_inter_h]) < distance([x_center, y_center], [x_inter_w, y_inter_w]):
x_inter, y_inter = x_inter_h, y_inter_h
short_d = distance([x_center, y_center], [x_inter_h, y_inter_h])
else:
x_inter, y_inter = x_inter_w, y_inter_w
short_d = distance([x_center, y_center], [x_inter_w, y_inter_w])
new_x_center = (x_upper_center + x_inter) /2
new_y_center = (y_upper_center + y_inter) /2
if w>h:
w = short_d + w/2
else:
h = short_d + h/2
new_rect = ((new_y_center, new_x_center), (w, h), angle)
new_box_points = cv2.boxPoints(new_rect)
new_box_points = np.int0(new_box_points)
#draw contour里 x坐标在y前, 两列交换
new_box_points = new_box_points[:, ::-1]
return new_rect, new_box_points
#根据矩形的长边变化的宽度,变化旋转矩形
def modify_rect(rect, x_inter, y_inter):
x_center = rect[0][1]
y_center = rect[0][0]
w=rect[1][0]
h=rect[1][1]
angle = rect[2]
add_h = distance([x_center, y_center], [x_inter, y_inter]) - max(w, h)/2
angle_x = angle_convert_x(w, h, angle)
new_x_center = x_center - add_h / 2 * np.cos(angle_x * np.pi / 180)
new_y_center = y_center - add_h / 2 * np.sin(angle_x * np.pi / 180)
if w>h:
w=w+add_h
else:
h=h+add_h
new_rect = ((new_y_center , new_x_center), (w, h), rect[2])
return new_rect
def rect2anchor(rect):
"""
rect : opencv 形式
anchor : [x_u, y_u, w, h, angle_x] w指较短的一边
"""
y_center = rect[0][0];
x_center = rect[0][1];
w=rect[1][0]
h=rect[1][1]
w_anno = min(w, h)
h_anno = max(w, h)
angle = rect[2]
#求这个矩形代表的直线
angle_x = angle_convert_x(w, h, angle)
x_upper_center = x_center - h_anno/2 * np.cos(angle_x * np.pi / 180)
y_upper_center = y_center - h_anno/2 * np.sin(angle_x * np.pi / 180)
return [x_upper_center, y_upper_center, w_anno, h_anno, angle_x]
def get_anno_rpn(frame_mp4_number, seg_width = 16, show = False, path2write = None, offset = 10):
"""
display : 0, nothin; 1, show; 2, write
return : (y1, y2, x1, w1, \theta)
"""
#得到frame/mp4/number
seg_img_file = os.path.join(seg_label_dir, frame_mp4_number+'.png')
seg_img = cv2.imread(seg_img_file)
H, W = seg_img.shape[:2]
seg_img = seg_img * 50
seg_img_ori = seg_img.copy()
seg_img_gray = cv2.cvtColor(seg_img, cv2.COLOR_BGR2GRAY)
#得到标注中有几条线,及其label
seg_labels = np.sort(np.unique(seg_img_gray))
print(seg_labels)
#图中没有车道线
if len(seg_labels) == 1:
if(show):
plt_show(seg_img)
return None
#保存直线参数的list,一条直线用3个参数表示[A,B, C]
lines = []
rects = []
#得到覆盖标注的最小矩形
for seg_label in seg_labels[1:]:
print(seg_label)
#rect, box_points = get_min_rect_ori(seg_img_gray, seg_label)
#cv2.drawContours(seg_img, [box_points], 0, (0, 255, 0), 2)
try:
rect, box_points = get_min_rect_v2(seg_img_gray, seg_label, seg_img)
except:
continue
if show:
cv2.drawContours(seg_img, [box_points], 0, (0, 0, 255), 2)
rects.append(rect)
y_center = rect[0][0];
x_center = rect[0][1];
w=rect[1][0]
h=rect[1][1]
# w_anno = min(w, h)
# h_anno = max(w, h)
angle = rect[2]
#求这个矩形代表的直线
angle_y = angle_convert(w, h, angle)
angle_x = angle_convert_x(w, h, angle)
A, B, C = get_line_equation(y_center, x_center, angle_y)
lines.append([A, B, C, angle_x])
#plt_show(seg_img)
#只有一条车道线的情况
if(len(lines) == 1):
if show:
cv2.drawContours(seg_img, [box_points], 0, (0, 0, 255), 2)
plt_show(seg_img)
return rects
#求交点
x_inter_list=[]
y_inter_list=[]
for i in range(len(lines)-1):
for j in range(i+1, len(lines)):
#print(lines[i], lines[j])
x_inter, y_inter = get_inter_point(lines[i][0], lines[i][1], lines[i][2], lines[j][0], lines[j][1], lines[j][2])
x_inter_list.append(x_inter)
y_inter_list.append(y_inter)
#一个点直接回归直线
if(len(x_inter_list) == 1):
seg_img_modify = seg_img.copy()
new_rects=[]
for rect in rects:
new_rect = modify_rect(rect, x_inter_list[0], y_inter_list[0])
new_rects.append(new_rect)
if show:
cv2.circle(seg_img_modify, (int(x_inter_list[0]),int(y_inter_list[0])), 3, (0, 255, 0), thickness=2)
new_box_points = cv2.boxPoints(new_rect)
new_box_points = np.int0(new_box_points)
#draw contour里 x坐标在y前, 两列交换
new_box_points = new_box_points[:, ::-1]
cv2.drawContours(seg_img_modify, [new_box_points], 0, (0, 0, 255), 2)
plt_show(seg_img_modify)
return new_rects
#得到 y=kx+b 的[k, b]
#kx-y+b=0
else:
new_rects = []
line_anno_list = []
x_annos = []
seg_img_modify = seg_img.copy()
horizon_line ,y_l, y_r = modify_points(x_inter_list.copy(), y_inter_list.copy(), seg_img_modify, offset=offset)
for i, (line, rect) in enumerate(zip(lines, rects)):
#print(line, horizon_line)
x_anno, y_anno = get_inter_point(line[0], line[1], line[2], horizon_line[0], -1, horizon_line[1])
if show:
cv2.circle(seg_img_modify, (int(x_anno),int(y_anno)), 3, (0, 255, 0), thickness=2)
new_rect = modify_rect(rect, x_anno, y_anno)
new_rects.append(new_rect)
if show:
new_box_points = cv2.boxPoints(new_rect)
new_box_points = np.int0(new_box_points)
#draw contour里 x坐标在y前, 两列交换
new_box_points = new_box_points[:, ::-1]
cv2.drawContours(seg_img_modify, [new_box_points], 0, color_list[i], 2)
if show:
for line, x_anno in zip(lines, x_annos):
#矩形代表直线在水平分割线上交点
y_anno = (-line[2] + -line[0] * x_anno)/line[1]
cv2.circle(seg_img_modify, (int(x_anno), int(y_anno)), 3, (0, 255, 0), thickness=3 )
plt_show(seg_img_modify)
return new_rects
#得到上顶点,x,y,w,h,theta(与x轴)
def get_anno_rpn_anchor(frame_mp4_number, seg_width = 16, show = False, path2write = None, offset = 10):
"""
display : 0, nothin; 1, show; 2, write
return : (y1, y2, x1, w1, \theta)
"""
#得到frame/mp4/number
seg_img_file = os.path.join(seg_label_dir, frame_mp4_number+'.png')
seg_img = cv2.imread(seg_img_file)
H, W = seg_img.shape[:2]
seg_img = seg_img * 50
seg_img_ori = seg_img.copy()
seg_img_gray = cv2.cvtColor(seg_img, cv2.COLOR_BGR2GRAY)
#得到标注中有几条线,及其label
seg_labels = np.sort(np.unique(seg_img_gray))
#图中没有车道线
if len(seg_labels) == 1:
if(show):
plt_show(seg_img)
return None
#保存直线参数的list,一条直线用3个参数表示[A,B, C]
lines = []
rects = []
#得到覆盖标注的最小矩形
for seg_label in seg_labels[1:]:
#rect, box_points = get_min_rect_ori(seg_img_gray, seg_label)
#cv2.drawContours(seg_img, [box_points], 0, (0, 255, 0), 2)
try:
rect, box_points = get_min_rect_v2(seg_img_gray, seg_label, seg_img)
except:
print(frame_mp4_number)
continue
if show:
cv2.drawContours(seg_img, [box_points], 0, (0, 0, 255), 2)
rects.append(rect)
y_center = rect[0][0];
x_center = rect[0][1];
w=rect[1][0]
h=rect[1][1]
# w_anno = min(w, h)
# h_anno = max(w, h)
angle = rect[2]
#求这个矩形代表的直线
angle_y = angle_convert(w, h, angle)
angle_x = angle_convert_x(w, h, angle)
A, B, C = get_line_equation(y_center, x_center, angle_y)
lines.append([A, B, C, angle_x])
#plt_show(seg_img)
#只有一条车道线的情况
if(len(lines) == 1):
if show:
cv2.drawContours(seg_img, [box_points], 0, (0, 0, 255), 2)
plt_show(seg_img)
anchor = rect2anchor(rects[0])
return [anchor[1], anchor[1], [anchor]]
#求交点
x_inter_list=[]
y_inter_list=[]
for i in range(len(lines)-1):
for j in range(i+1, len(lines)):
#print(lines[i], lines[j])
x_inter, y_inter = get_inter_point(lines[i][0], lines[i][1], lines[i][2], lines[j][0], lines[j][1], lines[j][2])
x_inter_list.append(x_inter)
y_inter_list.append(y_inter)
#一个点直接回归直线
if(len(x_inter_list) == 1):
seg_img_modify = seg_img.copy()
new_rects=[]
for rect in rects:
new_rect = modify_rect(rect, x_inter_list[0], y_inter_list[0])
new_rects.append(new_rect)
if show:
cv2.circle(seg_img_modify, (int(x_inter_list[0]),int(y_inter_list[0])), 3, (0, 255, 0), thickness=2)
new_box_points = cv2.boxPoints(new_rect)
new_box_points = np.int0(new_box_points)
#draw contour里 x坐标在y前, 两列交换
new_box_points = new_box_points[:, ::-1]
cv2.drawContours(seg_img_modify, [new_box_points], 0, (0, 0, 255), 2)
plt_show(seg_img_modify)
anchors = [rect2anchor(rect) for rect in new_rects]
return [y_inter_list[0],y_inter_list[0], anchors]
#得到 y=kx+b 的[k, b]
#kx-y+b=0
else:
new_rects = []
line_anno_list = []
x_annos = []
seg_img_modify = seg_img.copy()
horizon_line ,y_l, y_r = modify_points(x_inter_list.copy(), y_inter_list.copy(), seg_img_modify, offset=offset)
for i, (line, rect) in enumerate(zip(lines, rects)):
#print(line, horizon_line)
x_anno, y_anno = get_inter_point(line[0], line[1], line[2], horizon_line[0], -1, horizon_line[1])
if show:
cv2.circle(seg_img_modify, (int(x_anno),int(y_anno)), 3, (0, 255, 0), thickness=2)
new_rect = modify_rect(rect, x_anno, y_anno)
new_rects.append(new_rect)
if show:
new_box_points = cv2.boxPoints(new_rect)
new_box_points = np.int0(new_box_points)
#draw contour里 x坐标在y前, 两列交换
new_box_points = new_box_points[:, ::-1]
cv2.drawContours(seg_img_modify, [new_box_points], 0, color_list[i], 2)
if show:
for line, x_anno in zip(lines, x_annos):
#矩形代表直线在水平分割线上交点
y_anno = (-line[2] + -line[0] * x_anno)/line[1]
cv2.circle(seg_img_modify, (int(x_anno), int(y_anno)), 3, (0, 255, 0), thickness=3 )
plt_show(seg_img_modify)
anchors = [rect2anchor(rect) for rect in new_rects]
return [y_l, y_r, anchors]
#得到上顶点,x,y,w,h,theta(与x轴)
def get_anno_rpn_anchor(frame_mp4_number, seg_width = 16, show = False, path2write = None, offset = 10):
"""
display : 0, nothin; 1, show; 2, write
return : (y1, y2, x1, w1, \theta)
"""
#得到frame/mp4/number
seg_img_file = os.path.join(seg_label_dir, frame_mp4_number+'.png')
seg_img = cv2.imread(seg_img_file)
H, W = seg_img.shape[:2]
seg_img = seg_img * 50
seg_img_ori = seg_img.copy()
seg_img_gray = cv2.cvtColor(seg_img, cv2.COLOR_BGR2GRAY)
#得到标注中有几条线,及其label
seg_labels = np.sort(np.unique(seg_img_gray))
#图中没有车道线
if len(seg_labels) == 1:
if(show):
plt_show(seg_img)
return None
#保存直线参数的list,一条直线用3个参数表示[A,B, C]
lines = []
rects = []
#得到覆盖标注的最小矩形
for seg_label in seg_labels[1:]:
#rect, box_points = get_min_rect_ori(seg_img_gray, seg_label)
#cv2.drawContours(seg_img, [box_points], 0, (0, 255, 0), 2)
rect, box_points = get_min_rect_v2(seg_img_gray, seg_label, seg_img)
if show:
cv2.drawContours(seg_img, [box_points], 0, (0, 0, 255), 2)
rects.append(rect)
y_center = rect[0][0];
x_center = rect[0][1];
w=rect[1][0]
h=rect[1][1]
# w_anno = min(w, h)
# h_anno = max(w, h)
angle = rect[2]
#求这个矩形代表的直线
angle_y = angle_convert(w, h, angle)
angle_x = angle_convert_x(w, h, angle)
A, B, C = get_line_equation(y_center, x_center, angle_y)
lines.append([A, B, C, angle_x])
#plt_show(seg_img)
#只有一条车道线的情况
if(len(lines) == 1):
if show:
cv2.drawContours(seg_img, [box_points], 0, (0, 0, 255), 2)
plt_show(seg_img)
anchor = rect2anchor(rects[0])
return [anchor[1], anchor[1], [anchor]]
#求交点
x_inter_list=[]
y_inter_list=[]
for i in range(len(lines)-1):
for j in range(i+1, len(lines)):
#print(lines[i], lines[j])
x_inter, y_inter = get_inter_point(lines[i][0], lines[i][1], lines[i][2], lines[j][0], lines[j][1], lines[j][2])
x_inter_list.append(x_inter)
y_inter_list.append(y_inter)
#一个点直接回归直线
if(len(x_inter_list) == 1):
seg_img_modify = seg_img.copy()
new_rects=[]
for rect in rects:
new_rect = modify_rect(rect, x_inter_list[0], y_inter_list[0])
new_rects.append(new_rect)
if show:
cv2.circle(seg_img_modify, (int(x_inter_list[0]),int(y_inter_list[0])), 3, (0, 255, 0), thickness=2)
new_box_points = cv2.boxPoints(new_rect)
new_box_points = np.int0(new_box_points)
#draw contour里 x坐标在y前, 两列交换
new_box_points = new_box_points[:, ::-1]
cv2.drawContours(seg_img_modify, [new_box_points], 0, (0, 0, 255), 2)
plt_show(seg_img_modify)
anchors = [rect2anchor(rect) for rect in new_rects]
return [y_inter_list[0],y_inter_list[0], anchors]
#得到 y=kx+b 的[k, b]
#kx-y+b=0
else:
new_rects = []
line_anno_list = []
x_annos = []
seg_img_modify = seg_img.copy()
horizon_line ,y_l, y_r = modify_points(x_inter_list.copy(), y_inter_list.copy(), seg_img_modify, offset=offset)
for i, (line, rect) in enumerate(zip(lines, rects)):
#print(line, horizon_line)
x_anno, y_anno = get_inter_point(line[0], line[1], line[2], horizon_line[0], -1, horizon_line[1])
if show:
cv2.circle(seg_img_modify, (int(x_anno),int(y_anno)), 3, (0, 255, 0), thickness=2)
new_rect = modify_rect(rect, x_anno, y_anno)
new_rects.append(new_rect)
if show:
new_box_points = cv2.boxPoints(new_rect)
new_box_points = np.int0(new_box_points)
#draw contour里 x坐标在y前, 两列交换
new_box_points = new_box_points[:, ::-1]
cv2.drawContours(seg_img_modify, [new_box_points], 0, color_list[i], 2)
if show:
for line, x_anno in zip(lines, x_annos):
#矩形代表直线在水平分割线上交点
y_anno = (-line[2] + -line[0] * x_anno)/line[1]
cv2.circle(seg_img_modify, (int(x_anno), int(y_anno)), 3, (0, 255, 0), thickness=3 )
plt_show(seg_img_modify)
anchors = [rect2anchor(rect) for rect in new_rects]
return [y_l, y_r, anchors]
#得到上顶点,x,y,w,h,theta(与x轴)
def get_anno_vp(frame_mp4_number, seg_width = 16, show = False, path2write = None, offset = 10):
"""
display : 0, nothin; 1, show; 2, write
return : (y1, y2, x1, w1, \theta)
"""
#得到frame/mp4/number
seg_img_file = os.path.join(seg_label_dir, frame_mp4_number+'.png')
seg_img = cv2.imread(seg_img_file)
H, W = seg_img.shape[:2]
seg_img = seg_img * 50
seg_img_ori = seg_img.copy()
seg_img_gray = cv2.cvtColor(seg_img, cv2.COLOR_BGR2GRAY)
#得到标注中有几条线,及其label
seg_labels = np.sort(np.unique(seg_img_gray))
#图中没有车道线
if len(seg_labels) == 1:
if(show):
plt_show(seg_img)
return None
#保存直线参数的list,一条直线用3个参数表示[A,B, C]
lines = []
rects = []
#得到覆盖标注的最小矩形
for seg_label in seg_labels[1:]:
#rect, box_points = get_min_rect_ori(seg_img_gray, seg_label)
#cv2.drawContours(seg_img, [box_points], 0, (0, 255, 0), 2)
try:
rect, box_points = get_min_rect_v2(seg_img_gray, seg_label, seg_img)
except:
print(frame_mp4_number)
REFINE_LIST.append(frame_mp4_number)
continue
if show:
cv2.drawContours(seg_img, [box_points], 0, (0, 0, 255), 2)
rects.append(rect)
y_center = rect[0][0];
x_center = rect[0][1];
w=rect[1][0]
h=rect[1][1]
# w_anno = min(w, h)
# h_anno = max(w, h)
angle = rect[2]
#求这个矩形代表的直线
angle_y = angle_convert(w, h, angle)
angle_x = angle_convert_x(w, h, angle)
A, B, C = get_line_equation(y_center, x_center, angle_y)
lines.append([A, B, C, angle_x])
#plt_show(seg_img)
#只有一条车道线的情况
if(len(lines) == 1):
if show:
cv2.drawContours(seg_img, [box_points], 0, (0, 0, 255), 2)
plt_show(seg_img)
rect = rects[0]
y_center = rect[0][0]
x_center = rect[0][1]
w=rect[1][0]
h=rect[1][1]
w_anno = min(w, h)
h_anno = max(w, h)
angle = rect[2] #+ 0.001
#求这个矩形代表的直线
angle_y = angle_convert(w, h, angle)
angle_x = angle_convert_x(w, h, angle)
#print(angle_y, angle_x)
x_upper_center = x_center - h_anno/2 * np.cos(angle_x * np.pi / 180)
y_upper_center = y_center - h_anno/2 * np.sin(angle_x * np.pi / 180)
#anchor = rect2anchor(rects[0])
return (int(round(x_upper_center)), int(round(y_upper_center)))
#求交点
x_inter_list=[]
y_inter_list=[]
for i in range(len(lines)-1):
for j in range(i+1, len(lines)):
#print(lines[i], lines[j])
x_inter, y_inter = get_inter_point(lines[i][0], lines[i][1], lines[i][2], lines[j][0], lines[j][1], lines[j][2])
x_inter_list.append(x_inter)
y_inter_list.append(y_inter)
return (int(round(np.mean(x_inter_list))), int(round(np.mean(y_inter_list))))
path2write = vp_label_dir
if not os.path.exists(path2write):
os.makedirs(path2write)
offset = 10
cnt = 0
def write_anno_vp(seg_img_file):
global cnt
with lock:
cnt += 1
if cnt %10000 == 0:
print(cnt)
try:
name_split_list = seg_img_file.split('/')
frame_mp4_number = '/'.join(name_split_list[-3:])[:-4]
vp_img_path = os.path.join(path2write, frame_mp4_number+ '.png')
# if os.path.exists(txt_path):
# continue
dirname = os.path.dirname(vp_img_path )
if not os.path.exists(dirname):
os.makedirs(dirname)
annos = get_anno_vp(frame_mp4_number, seg_width=16, path2write = path2write, offset=10)
vp_img = np.zeros((H, W), dtype=np.uint8)
if annos is not None:
cv2.circle(vp_img, annos, 16, 255, -1)
vp_img[vp_img>0] = 1
assert (vp_img>1).sum() == 0
cv2.imwrite(vp_img_path, vp_img)
except Exception as e:
print(seg_img_file, "出现如下异常%s"%e, traceback.format_exc())
from multiprocessing.dummy import Pool, Lock
pool = Pool()
lock = Lock()
seg_img_list = glob(seg_label_dir + '/*/*/*png')
#write_anno_vp(seg_img_list)
pool.map(write_anno_vp, seg_img_list)
pool.close()
pool.join()
```
#### File: bbox/iou_calculators/riou_calculator.py
```python
import torch
from .builder import IOU_CALCULATORS
from maskrcnn_benchmark.structures.rboxlist_ops import box_iou
from mmdet.core.bbox.bbox_dis import get_roi2align
@IOU_CALCULATORS.register_module()
class RboxOverlaps2D(object):
"""2D IoU Calculator."""
def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate IoU between 2D bboxes.
Args:
bboxes1 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
format, or shape (m, 5) in <x1, y1, x2, y2, score> format.
bboxes2 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
format, shape (m, 5) in <x1, y1, x2, y2, score> format, or be
empty. If is_aligned is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or iof (intersection
over foreground).
Returns:
ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1)
"""
assert bboxes1.size(-1) in [0, 4, 5]
assert bboxes2.size(-1) in [0, 4, 5]
if bboxes2.size(-1) == 5:
bboxes2 = bboxes2[..., :4]
if bboxes1.size(-1) == 5:
bboxes1 = bboxes1[..., :4]
bboxes12align = get_roi2align(bboxes1, 590, 1640)
bboxes22align = get_roi2align(bboxes2, 590, 1640)
return box_iou(bboxes12align, bboxes22align)
def __repr__(self):
"""str: a string describing the module"""
repr_str = self.__class__.__name__ + '()'
return repr_str
```
#### File: lib/datasets/lane_dataset.py
```python
import logging
import cv2
import numpy as np
import imgaug.augmenters as iaa
from imgaug.augmenters import Resize
from torchvision.transforms import ToTensor
from torch.utils.data.dataset import Dataset
from scipy.interpolate import InterpolatedUnivariateSpline
from imgaug.augmentables.lines import LineString, LineStringsOnImage
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
from lib.lane import Lane
from .culane import CULane
from .tusimple import TuSimple
from .llamas import LLAMAS
from .ziyan import Ziyan
from .nolabel_dataset import NoLabelDataset
GT_COLOR = (255, 0, 0)
PRED_HIT_COLOR = (0, 255, 0)
PRED_MISS_COLOR = (0, 0, 255)
IMAGENET_MEAN = np.array([0.485, 0.456, 0.406])
IMAGENET_STD = np.array([0.229, 0.224, 0.225])
class LaneDataset(Dataset):
def __init__(self,
S=72,
dataset='tusimple',
augmentations=None,
normalize=False,
img_size=(360, 640),
aug_chance=1.,
**kwargs):
super(LaneDataset, self).__init__()
if dataset == 'tusimple':
self.dataset = TuSimple(**kwargs)
elif dataset == 'culane':
self.dataset = CULane(**kwargs)
elif dataset == 'ziyan':
self.dataset = Ziyan(**kwargs)
elif dataset == 'llamas':
self.dataset = LLAMAS(**kwargs)
elif dataset == 'nolabel_dataset':
self.dataset = NoLabelDataset(**kwargs)
else:
raise NotImplementedError()
self.n_strips = S - 1
self.n_offsets = S
self.normalize = normalize
self.img_h, self.img_w = img_size
self.strip_size = self.img_h / self.n_strips
self.logger = logging.getLogger(__name__)
# y at each x offset
self.offsets_ys = np.arange(self.img_h, -1, -self.strip_size)
self.transform_annotations()
if augmentations is not None:
# add augmentations
augmentations = [getattr(iaa, aug['name'])(**aug['parameters'])
for aug in augmentations] # add augmentation
else:
augmentations = []
transformations = iaa.Sequential([Resize({'height': self.img_h, 'width': self.img_w})])
self.to_tensor = ToTensor()
'''
augmentations:
- name: Affine
parameters:
translate_px:
x: !!python/tuple [-25, 25]
y: !!python/tuple [-10, 10]
rotate: !!python/tuple [-6, 6]
scale: !!python/tuple [0.85, 1.15]
- name: HorizontalFlip
parameters:
p: 0.5
'''
self.transform = iaa.Sequential([iaa.Sometimes(then_list=augmentations, p=aug_chance), transformations])
self.max_lanes = self.dataset.max_lanes
@property
def annotations(self):
return self.dataset.annotations
def transform_annotations(self):
self.logger.info("Transforming annotations to the model's target format...")
#print(self.transform_annotation)
#print(self.dataset.annotations)
self.dataset.annotations = np.array(list(map(self.transform_annotation, self.dataset.annotations)))
self.logger.info('Done.')
def filter_lane(self, lane):
assert lane[-1][1] <= lane[0][1]
filtered_lane = []
used = set()
for p in lane:
if p[1] not in used:
filtered_lane.append(p)
used.add(p[1])
return filtered_lane
def transform_annotation(self, anno, img_wh=None):
if img_wh is None:
img_h = self.dataset.get_img_heigth(anno['path'])
img_w = self.dataset.get_img_width(anno['path'])
else:
img_w, img_h = img_wh
old_lanes = anno['lanes']
# rpn_proposals = anno['rpn_proposals']
# for i in range(4):
# if len(old_lane[i]) < 2:
# pass
# removing lanes with less than 2 points
#print('&&', len(old_lanes))
# if len(old_lanes) ==5:
# print(old_lanes)
old_lanes = filter(lambda x: len(x) > 1, old_lanes)
# sort lane points by Y (bottom to top of the image)
old_lanes = [sorted(lane, key=lambda x: -x[1]) for lane in old_lanes]
#print('***', len(old_lanes))
# remove points with same Y (keep first occurrence)
old_lanes = [self.filter_lane(lane) for lane in old_lanes]
#print('****', len(old_lanes))
# normalize the annotation coordinates 对于线上的点在这里进行resize处理,其它在get_item的transform里
old_lanes = [[[x * self.img_w / float(img_w), y * self.img_h / float(img_h)] for x, y in lane]
for lane in old_lanes]
# 对消失点与角度进行normalize CCTODO
#print('*****', len(old_lanes))
# create tranformed annotations 车道线上面的部分取-10000,对下面的部分进行补充
lanes = np.ones((self.dataset.max_lanes, 2 + 1 + 1 + 1 + self.n_offsets),
dtype=np.float32) * -1e5 # 2 scores, 1 start_y, 1 start_x, 1 length, S+1 coordinates
# lanes are invalid by default
lanes[:, 0] = 1
lanes[:, 1] = 0
for lane_idx, lane in enumerate(old_lanes[:4]):
try:
xs_outside_image, xs_inside_image = self.sample_lane(lane, self.offsets_ys)
except AssertionError:
continue
if len(xs_inside_image) == 0:
continue
all_xs = np.hstack((xs_outside_image, xs_inside_image))
lanes[lane_idx, 0] = 0
lanes[lane_idx, 1] = 1
#归一化的y
lanes[lane_idx, 2] = len(xs_outside_image) / self.n_strips
lanes[lane_idx, 3] = xs_inside_image[0]
lanes[lane_idx, 4] = len(xs_inside_image)
lanes[lane_idx, 5:5 + len(all_xs)] = all_xs
#增加rpn_proposals
new_anno = {'path': anno['path'], 'label': lanes, 'rpn_proposals': anno['rpn_proposals'], 'vp_idx': anno['vp_idx'], 'old_anno': anno}
return new_anno
def sample_lane(self, points, sample_ys):
# this function expects the points to be sorted
points = np.array(points)
if not np.all(points[1:, 1] < points[:-1, 1]):
raise Exception('Annotaion points have to be sorted')
x, y = points[:, 0], points[:, 1]
# interpolate points inside domain
assert len(points) > 1
interp = InterpolatedUnivariateSpline(y[::-1], x[::-1], k=min(3, len(points) - 1))
domain_min_y = y.min()
domain_max_y = y.max()
sample_ys_inside_domain = sample_ys[(sample_ys >= domain_min_y) & (sample_ys <= domain_max_y)]
assert len(sample_ys_inside_domain) > 0
interp_xs = interp(sample_ys_inside_domain)
# extrapolate lane to the bottom of the image with a straight line using the 2 points closest to the bottom
two_closest_points = points[:2]
extrap = np.polyfit(two_closest_points[:, 1], two_closest_points[:, 0], deg=1)
extrap_ys = sample_ys[sample_ys > domain_max_y]
extrap_xs = np.polyval(extrap, extrap_ys)
all_xs = np.hstack((extrap_xs, interp_xs))
# separate between inside and outside points
inside_mask = (all_xs >= 0) & (all_xs < self.img_w)
xs_inside_image = all_xs[inside_mask]
xs_outside_image = all_xs[~inside_mask]
return xs_outside_image, xs_inside_image
def label_to_lanes(self, label):
lanes = []
for l in label:
if l[1] == 0:
continue
xs = l[5:] / self.img_w
ys = self.offsets_ys / self.img_h
start = int(round(l[2] * self.n_strips))
length = int(round(l[4]))
if length == 1:
start = start - 1
length = 2
xs = xs[start:start + length][::-1]
ys = ys[start:start + length][::-1]
xs = xs.reshape(-1, 1)
ys = ys.reshape(-1, 1)
points = np.hstack((xs, ys))
lanes.append(Lane(points=points))
return lanes
def draw_annotation(self, idx, label=None, pred=None, img=None, rpn_proposals = None, gt_vp = None, pred_vp=None):
# Get image if not provided
if img is None:
# print(self.annotations[idx]['path'])
img, label, _, _, _,_ = self.__getitem__(idx)
label = self.label_to_lanes(label)
img = img.permute(1, 2, 0).numpy()
if self.normalize:
img = img * np.array(IMAGENET_STD) + np.array(IMAGENET_MEAN)
img = (img * 255).astype(np.uint8)
else:
_, label, _, _, _,_ = self.__getitem__(idx)
try:
label = self.label_to_lanes(label)
except:
import pdb
pdb.set_trace()
img = cv2.resize(img, (self.img_w, self.img_h))
img_h, _, _ = img.shape
# Pad image to visualize extrapolated predictions
pad = 0
if pad > 0:
img_pad = np.zeros((self.img_h + 2 * pad, self.img_w + 2 * pad, 3), dtype=np.uint8)
img_pad[pad:-pad, pad:-pad, :] = img
img = img_pad
data = [(None, None, label)]
if pred is not None:
# print(len(pred), 'preds')
fp, fn, matches, accs = self.dataset.get_metrics(pred, idx)
# print('fp: {} | fn: {}'.format(fp, fn))
# print(len(matches), 'matches')
# print(matches, accs)
assert len(matches) == len(pred)
data.append((matches, accs, pred))
else:
fp = fn = None
#画出rpn_proposals
# import pdb
# pdb.set_trace()
# for line_i in range(4):
# proposal = rpn_proposals[line_i]
# end_point_y = proposal[1] + 200
# end_point_x = (end_point_y - proposal[1]) / np.tan(proposal[3] * np.pi / 180) + proposal[0]
# cv2.line(img, tuple(proposal[:2].astype(np.int)), (int(round(end_point_x)), int(end_point_y )), (0, 255, 255), 2)
for matches, accs, datum in data:
for i, l in enumerate(datum):
if matches is None:
color = GT_COLOR
elif matches[i]:
color = PRED_HIT_COLOR
else:
color = PRED_MISS_COLOR
points = l.points
points[:, 0] *= img.shape[1]
points[:, 1] *= img.shape[0]
points = points.round().astype(int)
points += pad
xs, ys = points[:, 0], points[:, 1]
for curr_p, next_p in zip(points[:-1], points[1:]):
img = cv2.line(img,
tuple(curr_p),
tuple(next_p),
color=color,
thickness=2 if matches is None else 2)
# if 'start_x' in l.metadata:
# start_x = l.metadata['start_x'] * img.shape[1]
# start_y = l.metadata['start_y'] * img.shape[0]
# cv2.circle(img, (int(start_x + pad), int(img_h - 1 - start_y + pad)),
# radius=5,
# color=(0, 0, 255),
# thickness=-1)
# if len(xs) == 0:
# print("Empty pred")
# if len(xs) > 0 and accs is not None:
# cv2.putText(img,
# '{:.0f} ({})'.format(accs[i] * 100, i),
# (int(xs[len(xs) // 2] + pad), int(ys[len(xs) // 2] + pad)),
# fontFace=cv2.FONT_HERSHEY_COMPLEX,
# fontScale=0.7,
# color=color)
# cv2.putText(img,
# '{:.0f}'.format(l.metadata['conf'] * 100),
# (int(xs[len(xs) // 2] + pad), int(ys[len(xs) // 2] + pad - 50)),
# fontFace=cv2.FONT_HERSHEY_COMPLEX,
# fontScale=0.7,
# color=(255, 0, 255))
# cv2.circle(img, tuple(gt_vp), 5, GT_COLOR, -1)
# cv2.circle(img, tuple(pred_vp), 5, (255, 255, 255), -1)
cv2.circle(img, tuple(int(x) for x in gt_vp), 5, GT_COLOR, -1)
cv2.circle(img, tuple(int(x) for x in pred_vp), 5, (255, 255, 255), -1)
img = cv2.resize(img, (self.dataset.img_w, self.dataset.img_h))
return img, fp, fn
def draw_annotation_point(self, idx, label=None, pred=None, img=None, rpn_proposals = None, gt_vp = None, pred_vp=None):
# Get image if not provided
if img is None:
# print(self.annotations[idx]['path'])
img, label, _, _, _,_ = self.__getitem__(idx)
label = self.label_to_lanes(label)
img = img.permute(1, 2, 0).numpy()
if self.normalize:
img = img * np.array(IMAGENET_STD) + np.array(IMAGENET_MEAN)
img = (img * 255).astype(np.uint8)
else:
_, label, _, _, _,_ = self.__getitem__(idx)
try:
label = self.label_to_lanes(label)
except:
import pdb
pdb.set_trace()
img = cv2.resize(img, (self.dataset.img_w, self.dataset.img_h))
ori_img = img.copy()
img_h, _, _ = img.shape
# Pad image to visualize extrapolated predictions
pad = 0
if pad > 0:
img_pad = np.zeros((self.img_h + 2 * pad, self.img_w + 2 * pad, 3), dtype=np.uint8)
img_pad[pad:-pad, pad:-pad, :] = img
img = img_pad
data = [(None, None, label)]
if pred is not None:
# print(len(pred), 'preds')
fp, fn, matches, accs = self.dataset.get_metrics(pred, idx)
# print('fp: {} | fn: {}'.format(fp, fn))
# print(len(matches), 'matches')
# print(matches, accs)
assert len(matches) == len(pred)
data.append((matches, accs, pred))
else:
fp = fn = None
for matches, accs, datum in data:
for i, l in enumerate(datum):
if matches is None:
color = GT_COLOR
continue
elif matches[i]:
color = PRED_HIT_COLOR
else:
color = PRED_MISS_COLOR
points = l.points
points[:, 0] *= img.shape[1]
points[:, 1] *= img.shape[0]
points = points.round().astype(int)
points += pad
xs, ys = points[:, 0], points[:, 1]
for point in points:
img = cv2.circle(img, tuple(point), color=color, radius=5, thickness=-1)
img = cv2.resize(img, (self.dataset.img_w, self.dataset.img_h))
return ori_img, img, fp, fn
def lane_to_linestrings(self, lanes):
lines = []
for lane in lanes:
lines.append(LineString(lane))
return lines
def linestrings_to_lanes(self, lines):
lanes = []
for line in lines:
lanes.append(line.coords)
return lanes
def __getitem__(self, idx):
item = self.dataset[idx]
print(item['path'])
print(self.dataset.img_w,self.dataset.img_h)
img_org = cv2.imread(item['path'])
img_vp = cv2.imread(item['old_anno']['vp_lbpth'], cv2.IMREAD_GRAYSCALE)
seg_img = cv2.imread(item['old_anno']['seg_lbpth'], cv2.IMREAD_GRAYSCALE)
if self.dataset.__class__.__name__ == 'Ziyan':
img_org = cv2.resize(img_org, (self.dataset.img_w, self.dataset.img_h))
img_vp = cv2.resize(img_vp, (self.dataset.img_w, self.dataset.img_h))
seg_img = cv2.resize(seg_img, (self.dataset.img_w, self.dataset.img_h))
#seg_img = self.dataset.convert_labels(img_vp)
img_vp = self.dataset.convert_vp_labels(img_vp)
seg_img = self.dataset.convert_labels(seg_img)
#print(item['path'])
line_strings_org = self.lane_to_linestrings(item['old_anno']['lanes'])
#print('*', len(line_strings_org))
line_strings_org = LineStringsOnImage(line_strings_org, shape=img_org.shape)
rpn_proposals = item['rpn_proposals']
vp_idx = item['vp_idx']
rpn_proposals = np.array(rpn_proposals)
#print(item['old_anno']['seg_lbpth'])
try:
vp_lane_segmap = SegmentationMapsOnImage(np.stack((img_vp, seg_img), axis=2), shape=img_vp.shape)
except:
seg_img = cv2.resize(seg_img, (1920, 1080))
vp_lane_segmap = SegmentationMapsOnImage(np.stack((img_vp, seg_img), axis=2), shape=img_vp.shape)
print('1088')
for i in range(30):
img, line_strings, vp_lane_label = self.transform(image=img_org.copy(), line_strings=line_strings_org, segmentation_maps = vp_lane_segmap)
# print('**', len(line_strings_org))
# print('****', len(line_strings), line_strings)
vp_label = vp_lane_label.arr[:, :, 0]
lane_label = vp_lane_label.arr[:, :, 1]
#因为transform 4条线成了5条, 一条弯线被截断,中间的一部分成了图片外面
line_strings.clip_out_of_image_()
# print('*****', len(line_strings), line_strings)
new_anno = {'path': item['path'], 'lanes': self.linestrings_to_lanes(line_strings), 'rpn_proposals': item['rpn_proposals'], 'vp_idx' : item['vp_idx']}
#print('***', len(self.linestrings_to_lanes(line_strings)))
try:
label = self.transform_annotation(new_anno, img_wh=(self.img_w, self.img_h))['label']
break
except:
if (i + 1) == 30:
self.logger.critical('Transform annotation failed 30 times :(')
exit()
#label = self.transform_annotation(new_anno, img_wh=(self.img_w, self.img_h))['label']
img = img / 255.
if self.normalize:
img = (img - IMAGENET_MEAN) / IMAGENET_STD
img = self.to_tensor(img.astype(np.float32))
vp_label = vp_label.astype(np.int64)
lane_label = lane_label.astype(np.int64)
return (img, label, np.array([0, 0]), np.array([0, 0]), vp_label, lane_label)
def __len__(self):
return len(self.dataset)
```
#### File: lib/datasets/tusimple.py
```python
import os
import json
import random
import logging
import numpy as np
from utils.tusimple_metric import LaneEval
from .lane_dataset_loader import LaneDatasetLoader
SPLIT_FILES = {
'train+val': ['label_data_0313.json', 'label_data_0601.json', 'label_data_0531.json'],
'train': ['label_data_0313.json', 'label_data_0601.json'],
'val': ['label_data_0531.json'],
'test': ['test_label.json'],
}
class TuSimple(LaneDatasetLoader):
def __init__(self, split='train', max_lanes=None, root=None):
if 'train' in split:
split = 'train+val'
self.split = split
print('TuSimple split ****' , split, root)
self.root = root
self.logger = logging.getLogger(__name__)
if split not in SPLIT_FILES.keys():
raise Exception('Split `{}` does not exist.'.format(split))
self.anno_files = [os.path.join(self.root, path) for path in SPLIT_FILES[split]]
if root is None:
raise Exception('Please specify the root directory')
if split == 'train' or split == 'train+val' or split =='val':
self.vp_path = './datasets/tusimple/training/vp_label_32'
self.lane_seg_path = './datasets/tusimple/training/gt_binary_image'
else:
self.vp_path = './datasets/tusimple-test/testing/vp_label_32'
self.lane_seg_path = './datasets/tusimple-test/testing/gt_binary_image'
with open('./datasets/vp.json', 'r') as fr:
labels_info = json.load(fr)
self.vplb_map = {el['id']: el['trainId'] for el in labels_info}
#tusimple使用二值分割的车道线掩码
with open('./datasets/vp.json', 'r') as fr:
labels_info = json.load(fr)
self.lb_map = {el['id']: el['trainId'] for el in labels_info}
self.img_w, self.img_h = 1280, 720
self.annotations = []
self.load_annotations()
# Force max_lanes, used when evaluating testing with models trained on other datasets
if max_lanes is not None:
self.max_lanes = max_lanes
def get_img_heigth(self, _):
return 720
def get_img_width(self, _):
return 1280
def get_metrics(self, lanes, idx):
label = self.annotations[idx]
org_anno = label['old_anno']
pred = self.pred2lanes(org_anno['path'], lanes, org_anno['y_samples'])
_, fp, fn, matches, accs, _ = LaneEval.bench(pred, org_anno['org_lanes'], org_anno['y_samples'], 0, True)
return fp, fn, matches, accs
def pred2lanes(self, path, pred, y_samples):
ys = np.array(y_samples) / self.img_h
lanes = []
for lane in pred:
xs = lane(ys)
invalid_mask = xs < 0
lane = (xs * self.get_img_width(path)).astype(int)
lane[invalid_mask] = -2
lanes.append(lane.tolist())
return lanes
def load_annotations(self):
self.logger.info('Loading TuSimple annotations...')
self.annotations = []
max_lanes = 0
for anno_file in self.anno_files:
with open(anno_file, 'r') as anno_obj:
lines = anno_obj.readlines()
for line in lines:
data = json.loads(line)
y_samples = data['h_samples']
gt_lanes = data['lanes']
lanes = [[(x, y) for (x, y) in zip(lane, y_samples) if x >= 0] for lane in gt_lanes]
lanes = [lane for lane in lanes if len(lane) > 0]
max_lanes = max(max_lanes, len(lanes))
self.annotations.append({
'path': os.path.join(self.root, data['raw_file']),
'org_path': data['raw_file'],
'org_lanes': gt_lanes,
'lanes': lanes,
'aug': False,
'y_samples': y_samples,
'rpn_proposals': None,
'vp_idx': None,
'vp_lbpth': os.path.join(self.vp_path, data['raw_file'].replace('jpg', 'png')),
'seg_lbpth': os.path.join(self.lane_seg_path, data['raw_file'].replace('jpg', 'png'))
})
if self.split == 'train':
random.shuffle(self.annotations)
self.max_lanes = max_lanes
self.logger.info('%d annotations loaded, with a maximum of %d lanes in an image.', len(self.annotations),
self.max_lanes)
def transform_annotations(self, transform):
self.annotations = list(map(transform, self.annotations))
def pred2tusimpleformat(self, idx, pred, runtime):
runtime *= 1000. # s to ms
img_name = self.annotations[idx]['old_anno']['org_path']
h_samples = self.annotations[idx]['old_anno']['y_samples']
lanes = self.pred2lanes(img_name, pred, h_samples)
output = {'raw_file': img_name, 'lanes': lanes, 'run_time': runtime}
return json.dumps(output)
def save_tusimple_predictions(self, predictions, filename, runtimes=None):
if runtimes is None:
runtimes = np.ones(len(predictions)) * 1.e-3
lines = []
for idx, (prediction, runtime) in enumerate(zip(predictions, runtimes)):
line = self.pred2tusimpleformat(idx, prediction, runtime)
lines.append(line)
with open(filename, 'w') as output_file:
output_file.write('\n'.join(lines))
def eval_predictions(self, predictions, output_basedir, runtimes=None):
pred_filename = os.path.join(output_basedir, 'tusimple_predictions.json')
self.save_tusimple_predictions(predictions, pred_filename, runtimes)
result = json.loads(LaneEval.bench_one_submit(pred_filename, self.anno_files[0]))
table = {}
for metric in result:
table[metric['name']] = metric['value']
return table
def convert_labels(self, label):
for k, v in self.lb_map.items():
label[label == k] = v
return label
def convert_vp_labels(self, label):
for k, v in self.vplb_map.items():
label[label == k] = v
return label
def __getitem__(self, idx):
return self.annotations[idx]
def __len__(self):
return len(self.annotations)
```
#### File: SGNet/lib/experiment.py
```python
import os
import re
import json
import logging
import subprocess
import torch
from torch.utils.tensorboard import SummaryWriter
class Experiment:
def __init__(self, exp_name, args=None, mode='train', exps_basedir='experiments', tensorboard_dir='tensorboard'):
self.name = exp_name
self.exp_dirpath = os.path.join(exps_basedir, exp_name)
self.models_dirpath = os.path.join(self.exp_dirpath, 'models')
self.results_dirpath = os.path.join(self.exp_dirpath, 'results')
self.cfg_path = os.path.join(self.exp_dirpath, 'config.yaml')
self.code_state_path = os.path.join(self.exp_dirpath, 'code_state.txt')
self.log_path = os.path.join(self.exp_dirpath, 'log_{}.txt'.format(mode))
self.tensorboard_writer = SummaryWriter(os.path.join(tensorboard_dir, exp_name))
self.cfg = None
self.setup_exp_dir()
self.setup_logging()
if args is not None:
self.log_args(args)
def setup_exp_dir(self):
if not os.path.exists(self.exp_dirpath):
os.makedirs(self.exp_dirpath)
os.makedirs(self.models_dirpath)
os.makedirs(self.results_dirpath)
self.save_code_state()
def save_code_state(self):
state = "Git hash: {}".format(
subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, check=False).stdout.decode('utf-8'))
state += '\n*************\nGit diff:\n*************\n'
state += subprocess.run(['git', 'diff'], stdout=subprocess.PIPE, check=False).stdout.decode('utf-8')
with open(self.code_state_path, 'w') as code_state_file:
code_state_file.write(state)
def setup_logging(self):
formatter = logging.Formatter("[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s")
file_handler = logging.FileHandler(self.log_path)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
logging.basicConfig(level=logging.DEBUG, handlers=[file_handler, stream_handler])
self.logger = logging.getLogger(__name__)
def log_args(self, args):
self.logger.debug('CLI Args:\n %s', str(args))
def set_cfg(self, cfg, override=False):
assert 'model_checkpoint_interval' in cfg
self.cfg = cfg
if not os.path.exists(self.cfg_path) or override:
with open(self.cfg_path, 'w') as cfg_file:
cfg_file.write(str(cfg))
def get_last_checkpoint_epoch(self):
pattern = re.compile('model_(\\d+).pt')
last_epoch = -1
for ckpt_file in os.listdir(self.models_dirpath):
result = pattern.match(ckpt_file)
if result is not None:
epoch = int(result.groups()[0])
if epoch > last_epoch:
last_epoch = epoch
return last_epoch
def get_checkpoint_path(self, epoch):
return os.path.join(self.models_dirpath, 'model_{:04d}.pt'.format(epoch))
def get_epoch_model(self, epoch):
return torch.load(self.get_checkpoint_path(epoch))['model']
def load_last_train_state(self, model, optimizer, scheduler):
epoch = self.get_last_checkpoint_epoch()
train_state_path = self.get_checkpoint_path(epoch)
train_state = torch.load(train_state_path)
if hasattr(model, 'module') :
model.module.load_state_dict(train_state['model'])
else:
model.load_state_dict(train_state['model'], strict= False)
optimizer.load_state_dict(train_state['optimizer'])
scheduler.load_state_dict(train_state['scheduler'])
return epoch, model, optimizer, scheduler
def save_train_state(self, epoch, model, optimizer, scheduler):
train_state_path = self.get_checkpoint_path(epoch)
state = model.module.state_dict() if hasattr(model, 'module') else model.state_dict()
torch.save(
{
'epoch': epoch,
'model': state,
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict()
}, train_state_path)
def iter_end_callback(self, epoch, max_epochs, iter_nb, max_iter, loss, loss_components):
line = 'Epoch [{}/{}] - Iter [{}/{}] - Loss: {:.5f} - '.format(epoch, max_epochs, iter_nb, max_iter, loss)
line += ' - '.join(
['{}: {:.5f}'.format(component, loss_components[component]) for component in loss_components])
self.logger.debug(line)
overall_iter = (epoch * max_iter) + iter_nb
self.tensorboard_writer.add_scalar('loss/total_loss', loss, overall_iter)
for key in loss_components:
self.tensorboard_writer.add_scalar('loss/{}'.format(key), loss_components[key], overall_iter)
def epoch_start_callback(self, epoch, max_epochs):
self.logger.debug('Epoch [%d/%d] starting.', epoch, max_epochs)
def epoch_end_callback(self, epoch, max_epochs, model, optimizer, scheduler, rank=0):
if rank == 0:
self.logger.debug('Epoch [%d/%d] finished.', epoch, max_epochs)
if epoch % self.cfg['model_checkpoint_interval'] == 0:
self.save_train_state(epoch, model, optimizer, scheduler)
def train_start_callback(self, cfg):
self.logger.debug('Beginning training session. CFG used:\n%s', str(cfg))
def train_end_callback(self):
self.logger.debug('Training session finished.')
def eval_start_callback(self, cfg):
self.logger.debug('Beginning testing session. CFG used:\n%s', str(cfg))
def eval_end_callback(self, dataset, predictions, epoch_evaluated):
metrics = self.save_epoch_results(dataset, predictions, epoch_evaluated)
self.logger.debug('Testing session finished on model after epoch %d.', epoch_evaluated)
self.logger.info('Results:\n %s', str(metrics))
def save_epoch_results(self, dataset, predictions, epoch):
# setup dirs
epoch_results_path = os.path.join(self.results_dirpath, 'epoch_{:04d}'.format(epoch))
predictions_dir = os.path.join(epoch_results_path, '{}_predictions'.format(dataset.split))
os.makedirs(predictions_dir, exist_ok=True)
# eval metrics
metrics = dataset.eval_predictions(predictions, output_basedir=predictions_dir)
# log tensorboard metrics
for key in metrics:
self.tensorboard_writer.add_scalar('{}_metrics/{}'.format(dataset.split, key), metrics[key], epoch)
# save metrics
metrics_path = os.path.join(epoch_results_path, '{}_metrics.json'.format(dataset.split))
with open(metrics_path, 'w') as results_file:
json.dump(metrics, results_file)
# save the cfg used
with open(os.path.join(epoch_results_path, 'config.yaml'), 'w') as cfg_file:
cfg_file.write(str(self.cfg))
return metrics
```
#### File: lib/models/laneatt_vp.py
```python
import math
import cv2
import torch
import numpy as np
import torch.nn as nn
from torchvision.models import resnet18, resnet34
from nms import nms
from lib.lane import Lane
from lib.focal_loss import FocalLoss
#from .resnet import resnet122 as resnet122_cifar
from .resnet import Resnet50
from .matching import match_proposals_with_targets
from time import *
from torch.nn import BatchNorm2d
import torch.nn.functional as F
from .loss import OhemCELoss, OhemCELoss_weighted
class ConvBNReLU(nn.Module):
def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, dilation=1, *args, **kwargs):
super(ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(in_chan,
out_chan,
kernel_size = ks,
stride = stride,
padding = padding,
dilation = dilation,
bias = True)
self.bn = BatchNorm2d(out_chan)
self.relu = nn.ReLU(inplace=True)
self.init_weight()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
class ASPP(nn.Module):
def __init__(self, in_chan=2048, out_chan=256, with_gp=True, *args, **kwargs):
super(ASPP, self).__init__()
self.with_gp = with_gp
self.conv1 = ConvBNReLU(in_chan, out_chan, ks=1, dilation=1, padding=0)
self.conv2 = ConvBNReLU(in_chan, out_chan, ks=3, dilation=6, padding=6)
self.conv3 = ConvBNReLU(in_chan, out_chan, ks=3, dilation=12, padding=12)
self.conv4 = ConvBNReLU(in_chan, out_chan, ks=3, dilation=18, padding=18)
if self.with_gp:
self.avg = nn.AdaptiveAvgPool2d((1, 1))
self.conv1x1 = ConvBNReLU(in_chan, out_chan, ks=1)
self.conv_out = ConvBNReLU(out_chan*5, out_chan, ks=1)
else:
self.conv_out = ConvBNReLU(out_chan*4, out_chan, ks=1)
self.init_weight()
def forward(self, x):
H, W = x.size()[2:]
feat1 = self.conv1(x)
feat2 = self.conv2(x)
feat3 = self.conv3(x)
feat4 = self.conv4(x)
if self.with_gp:
avg = self.avg(x)
feat5 = self.conv1x1(avg)
feat5 = F.interpolate(feat5, (H, W), mode='bilinear', align_corners=True)
feat = torch.cat([feat1, feat2, feat3, feat4, feat5], 1)
else:
feat = torch.cat([feat1, feat2, feat3, feat4], 1)
feat = self.conv_out(feat)
return feat
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
class Decoder(nn.Module):
def __init__(self, n_classes, low_chan=256, *args, **kwargs):
super(Decoder, self).__init__()
#self.conv_low = ConvBNReLU(low_chan, 48, ks=1, padding=0)
self.conv_cat = nn.Sequential(
ConvBNReLU(256, 256, ks=3, padding=1),
ConvBNReLU(256, 256, ks=3, padding=1),
)
self.conv_out_6classes = nn.Conv2d(256, n_classes, kernel_size=1, bias=False)
self.init_weight()
def forward(self, feat_aspp):
# H, W = feat_low.size()[2:]
# feat_low = self.conv_low(feat_low)
# feat_aspp_up = F.interpolate(feat_aspp, (H, W), mode='bilinear',
# align_corners=True)
# feat_cat = torch.cat([feat_low, feat_aspp_up], dim=1)
feat_out = self.conv_cat(feat_aspp)
logits = self.conv_out_6classes(feat_out)
return logits
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
class LaneATTVP(nn.Module):
def __init__(self,
backbone='resnet34',
pretrained_backbone=False,
S=72,
img_w=640,
img_h=360,
w_interval=10.,
on_line=False,
weighted_loss = False,
anchors_freq_path=None,
topk_anchors=None,
anchor_feat_channels=64):
super(LaneATTVP, self).__init__()
# Some definitions
self.weighted_loss = weighted_loss
# backbone_nb_channels = 2048
# self.stride = 16
# self.feature_extractor = Resnet50(stride=self.stride)
self.feature_extractor, backbone_nb_channels, self.stride = get_backbone(backbone, pretrained_backbone)
self.grid_pixel = 20
self.grid_strips = int(self.grid_pixel / (img_h / S) * 2 + 1)
angles = torch.arange(0.0, 180.0 + w_interval, w_interval).clamp(1, 179)
self.img_w = img_w
self.img_h = img_h
self.n_strips = S - 1
self.n_offsets = S
self.fmap_h = img_h // self.stride
fmap_w = img_w // self.stride
self.fmap_w = fmap_w
self.anchor_ys = torch.linspace(1, 0, steps=self.n_offsets, dtype=torch.float32)
self.anchor_cut_ys = torch.linspace(1, 0, steps=self.fmap_h, dtype=torch.float32)
self.anchor_feat_channels = anchor_feat_channels
# Anchor angles, same ones used in Line-CNN
self.left_angles = [72., 60., 49., 39., 30., 22.]
self.right_angles = [108., 120., 131., 141., 150., 158.]
self.bottom_angles = [165., 150., 141., 131., 120., 108., 100., 90., 80., 72., 60., 49., 39., 30., 15.]
self.vp_base_anchors = self.gen_vp_base_anchors(angles, self.grid_pixel, img_h / S, on_line)
self.n_proposal = self.vp_base_anchors.shape[0]
# # Generate anchors
# # anchor与anchor在feature map上的坐标
# self.anchors, self.anchors_cut = self.generate_anchors(lateral_n=72, bottom_n=128)
# # Filter masks if `anchors_freq_path` is provided
# if anchors_freq_path is not None:
# anchors_mask = torch.load(anchors_freq_path).cpu()
# assert topk_anchors is not None
# ind = torch.argsort(anchors_mask, descending=True)[:topk_anchors]
# self.anchors = self.anchors[ind]
# self.anchors_cut = self.anchors_cut[ind]
# # Pre compute indices for the anchor pooling
# # cut的时候将cut的坐标都变成n * 1维,比分batch cut更好
# self.cut_zs, self.cut_ys, self.cut_xs, self.invalid_mask = self.compute_anchor_cut_indices(
# self.anchor_feat_channels, fmap_w, self.fmap_h)
self.cut_zs, self.cut_ys = self.compute_anchor_cut_indices_proposals_ys_zs(self.anchor_feat_channels, self.fmap_w, self.fmap_h)
# Setup and initialize layers
self.conv1 = nn.Conv2d(backbone_nb_channels, self.anchor_feat_channels, kernel_size=1)
self.cls_layer = nn.Linear(2 * self.anchor_feat_channels * self.fmap_h, 2)
self.reg_layer = nn.Linear(2 * self.anchor_feat_channels * self.fmap_h, self.n_offsets + 1)
#self.attention_layer = nn.Linear(self.anchor_feat_channels * self.fmap_h, len(self.anchors) - 1)
self.attention_layer = nn.Linear(self.anchor_feat_channels * self.fmap_h, self.n_proposal - 1)
self.initialize_layer(self.attention_layer)
self.initialize_layer(self.conv1)
self.initialize_layer(self.cls_layer)
self.initialize_layer(self.reg_layer)
#新增消失点
#channel = 256
#新增预测消失点、条数、角度
self.aspp = ASPP(in_chan=backbone_nb_channels, out_chan=256, with_gp=False)
self.decoder = Decoder(2, low_chan=256)
n_min = 8 * self.img_w * self.img_h // 16
if weighted_loss:
print('weighted')
self.vp_criteria = OhemCELoss_weighted(thresh=0.7, n_min=n_min, n_classes=2).cuda()
else:
self.vp_criteria = OhemCELoss(thresh=0.7, n_min=n_min).cuda()
def forward(self, x, rpn_proposals = None, vp=None, conf_threshold=None, nms_thres=0, nms_topk=3000):
mini_batch, _, H, W = x.size()
batch_features = self.feature_extractor(x)
#消失点计算
feat_aspp = self.aspp(batch_features)
vp_logits = self.decoder(feat_aspp)
vp_logits = F.interpolate(vp_logits, (self.img_h, self.img_w), mode='bilinear', align_corners=True)
vp_probs = F.softmax(vp_logits, 1)
vp_preds = torch.argmax(vp_probs, 1)
pred_vps = []
for pred_i in range(len(vp_preds)):
vp_x_y = torch.mean(torch.nonzero(vp_preds[pred_i], as_tuple=False).float(), 0)
pred_vps.append(vp_x_y)
pred_vps = torch.stack(pred_vps)[:, [1, 0]]
#预测出没有消失点的分割图,消失点设为0
mask = (pred_vps[:, 0] > -1000).reshape(-1, 1, 1)
pred_vps[:, 0] = torch.where(torch.isnan(pred_vps[:, 0]), torch.tensor(0.0, device = pred_vps.device), pred_vps[:, 0])
pred_vps[:, 1] = torch.where(torch.isnan(pred_vps[:, 1]), torch.tensor(0.0, device = pred_vps.device), pred_vps[:, 1])
batch_features = self.conv1(batch_features)
batch_size = len(x)
#begin_time = time()
anchors, anchors_cut = self.gen_vp_edge_anchors(pred_vps)
# import pdb
# pdb.set_trace()
anchors = anchors.reshape(batch_size, self.n_proposal, -1)
anchors_cut = anchors_cut.reshape(batch_size, self.n_proposal, -1)
anchors = mask * anchors + (~mask) * torch.zeros_like(anchors)
anchors_cut = mask * anchors_cut + (~mask) * torch.zeros_like(anchors_cut)
cut_xs, invalid_mask = self.compute_anchor_cut_indices_proposals_xs(anchors, anchors_cut,
self.anchor_feat_channels, self.fmap_w, self.fmap_h)
#end_time = time()
#print('cut耗时', end_time - begin_time)
#begin_time = time()
# (batch_size, n_proposals, n_fmaps, self.fmap_h, 1())
batch_anchor_features = self.cut_anchor_features_proposals(batch_features, cut_xs, invalid_mask, self.cut_zs, self.cut_ys)
# Join proposals from all images into a single proposals features batch
batch_anchor_features = batch_anchor_features.view(-1, self.anchor_feat_channels * self.fmap_h)
# Add attention features
softmax = nn.Softmax(dim=1)
scores = self.attention_layer(batch_anchor_features)
attention = softmax(scores).reshape(x.shape[0], self.n_proposal, -1)
attention_matrix = torch.eye(attention.shape[1], device=x.device).repeat(x.shape[0], 1, 1)
non_diag_inds = torch.nonzero(attention_matrix == 0., as_tuple=False)
attention_matrix[:] = 0
attention_matrix[non_diag_inds[:, 0], non_diag_inds[:, 1], non_diag_inds[:, 2]] = attention.flatten()
batch_anchor_features = batch_anchor_features.reshape(x.shape[0], self.n_proposal , -1)
attention_features = torch.bmm(torch.transpose(batch_anchor_features, 1, 2),
torch.transpose(attention_matrix, 1, 2)).transpose(1, 2)
attention_features = attention_features.reshape(-1, self.anchor_feat_channels * self.fmap_h)
batch_anchor_features = batch_anchor_features.reshape(-1, self.anchor_feat_channels * self.fmap_h)
batch_anchor_features = torch.cat((attention_features, batch_anchor_features), dim=1)
# Predict
cls_logits = self.cls_layer(batch_anchor_features)
reg = self.reg_layer(batch_anchor_features)
# Undo joining
cls_logits = cls_logits.reshape(x.shape[0], -1, cls_logits.shape[1])
reg = reg.reshape(x.shape[0], -1, reg.shape[1])
# Add offsets to anchors
reg_proposals = torch.zeros((*cls_logits.shape[:2], 5 + self.n_offsets), device=x.device)
reg_proposals += anchors
reg_proposals[:, :, :2] = cls_logits
reg_proposals[:, :, 4:] += reg
# Apply nms
proposals_list = self.nms(reg_proposals, attention_matrix, nms_thres, nms_topk, conf_threshold, anchors)
#end_time = time()
return proposals_list, vp_logits, pred_vps, None
def nms(self, batch_proposals, batch_attention_matrix, nms_thres, nms_topk, conf_threshold, anchors=None):
softmax = nn.Softmax(dim=1)
proposals_list = []
for batch_idx, (proposals, attention_matrix) in enumerate(zip(batch_proposals, batch_attention_matrix)):
anchor_inds = torch.arange(batch_proposals.shape[1], device=proposals.device)
anchor = anchors[batch_idx]
# The gradients do not have to (and can't) be calculated for the NMS procedure
with torch.no_grad():
scores = softmax(proposals[:, :2])[:, 1]
#默认为none,不为none的话有问题
if conf_threshold is not None:
# apply confidence threshold
above_threshold = scores > conf_threshold
proposals = proposals[above_threshold]
scores = scores[above_threshold]
anchor_inds = anchor_inds[above_threshold]
if proposals.shape[0] == 0:
proposals_list.append((proposals[[]], anchors[[]], attention_matrix[[]], None))
continue
keep, num_to_keep, _ = nms(proposals, scores, overlap=nms_thres, top_k=nms_topk)
keep = keep[:num_to_keep]
proposals = proposals[keep]
anchor_inds = anchor_inds[keep]
attention_matrix = attention_matrix[anchor_inds]
proposals_list.append((proposals, anchors[batch_idx][keep], attention_matrix, anchor_inds))
return proposals_list
def loss(self, proposals_list, vp_logits, vp_preds, targets, vp_labels= None, lane_logits=None, lane_labels = None, cls_loss_weight=10):
vp_loss = self.vp_criteria(vp_logits, vp_labels)
with torch.no_grad():
gt_vps = []
for gt_i in range(len(targets)):
vp_x_y = torch.mean(torch.nonzero(vp_labels[gt_i], as_tuple=False).float(), 0)
gt_vps.append(vp_x_y)
gt_vps = torch.stack(gt_vps)[:, [1, 0]]
gt_vps[:, 0] = torch.where(torch.isnan(gt_vps[:, 0]), torch.tensor(-0.0, device = gt_vps.device), gt_vps[:, 0])
gt_vps[:, 1] = torch.where(torch.isnan(gt_vps[:, 1]), torch.tensor(-0.0, device = gt_vps.device), gt_vps[:, 1])
vp_dis = torch.mean(torch.abs(vp_preds - gt_vps))
focal_loss = FocalLoss(alpha=0.25, gamma=2.)
smooth_l1_loss = nn.SmoothL1Loss()
cls_loss = 0
reg_loss = 0
valid_imgs = len(targets)
total_positives = 0
for (proposals, anchors, _, _), target in zip(proposals_list, targets):
# Filter lanes that do not exist (confidence == 0)
target = target[target[:, 1] == 1]
if len(target) == 0:
# If there are no targets, all proposals have to be negatives (i.e., 0 confidence)
cls_target = proposals.new_zeros(len(proposals)).long()
cls_pred = proposals[:, :2]
# import pdb
# pdb.set_trace()
cls_loss += focal_loss(cls_pred, cls_target).sum()
continue
if len(proposals) == 0:
continue
# Gradients are also not necessary for the positive & negative matching
with torch.no_grad():
# import pdb
# pdb.set_trace()
try:
positives_mask, invalid_offsets_mask, negatives_mask, target_positives_indices = match_proposals_with_targets(
self, anchors, target)
except:
import pdb
pdb.set_trace()
positives = proposals[positives_mask]
num_positives = len(positives)
total_positives += num_positives
negatives = proposals[negatives_mask]
num_negatives = len(negatives)
# Handle edge case of no positives found
if num_positives == 0:
cls_target = proposals.new_zeros(len(proposals)).long()
cls_pred = proposals[:, :2]
# import pdb
# pdb.set_trace()
cls_loss += focal_loss(cls_pred, cls_target).sum()
continue
# Get classification targets
all_proposals = torch.cat([positives, negatives], 0)
cls_target = proposals.new_zeros(num_positives + num_negatives).long()
cls_target[:num_positives] = 1.
cls_pred = all_proposals[:, :2]
# Regression targets
reg_pred = positives[:, 4:]
with torch.no_grad():
target = target[target_positives_indices]
positive_starts = (positives[:, 2] * self.n_strips).round().long()
target_starts = (target[:, 2] * self.n_strips).round().long()
target[:, 4] -= positive_starts - target_starts
all_indices = torch.arange(num_positives, dtype=torch.long)
ends = (positive_starts + target[:, 4] - 1).round().long()
invalid_offsets_mask = torch.zeros((num_positives, 1 + self.n_offsets + 1),
dtype=torch.int) # length + S + pad
invalid_offsets_mask[all_indices, 1 + positive_starts] = 1
invalid_offsets_mask[all_indices, 1 + ends + 1] -= 1
invalid_offsets_mask = invalid_offsets_mask.cumsum(dim=1) == 0
invalid_offsets_mask = invalid_offsets_mask[:, :-1]
invalid_offsets_mask[:, 0] = False
reg_target = target[:, 4:]
reg_target[invalid_offsets_mask] = reg_pred[invalid_offsets_mask]
# Loss calc
reg_loss += smooth_l1_loss(reg_pred, reg_target)
cls_loss += focal_loss(cls_pred, cls_target).sum() / num_positives
# Batch mean
cls_loss /= valid_imgs
reg_loss /= valid_imgs
loss = reg_loss + cls_loss * cls_loss_weight + vp_loss * 5
return loss, {'vp_loss': vp_loss, 'vp_dis': vp_dis, 'cls_loss': cls_loss, 'reg_loss': reg_loss, 'batch_positives': total_positives}
def compute_anchor_cut_indices(self, n_fmaps, fmaps_w, fmaps_h):
# definitions
n_proposals = len(self.anchors_cut)
# indexing
unclamped_xs = torch.flip((self.anchors_cut[:, 5:] / self.stride).round().long(), dims=(1,))
unclamped_xs = unclamped_xs.unsqueeze(2)
unclamped_xs = torch.repeat_interleave(unclamped_xs, n_fmaps, dim=0).reshape(-1, 1)
cut_xs = torch.clamp(unclamped_xs, 0, fmaps_w - 1)
unclamped_xs = unclamped_xs.reshape(n_proposals, n_fmaps, fmaps_h, 1)
invalid_mask = (unclamped_xs < 0) | (unclamped_xs > fmaps_w)
cut_ys = torch.arange(0, fmaps_h)
cut_ys = cut_ys.repeat(n_fmaps * n_proposals)[:, None].reshape(n_proposals, n_fmaps, fmaps_h)
cut_ys = cut_ys.reshape(-1, 1)
cut_zs = torch.arange(n_fmaps).repeat_interleave(fmaps_h).repeat(n_proposals)[:, None]
return cut_zs, cut_ys, cut_xs, invalid_mask
def compute_anchor_cut_indices_proposals(self, anchors, anchors_cut, n_fmaps, fmaps_w, fmaps_h):
n_proposals = self.n_proposal
anchors_cut = anchors_cut.reshape(-1, anchors_cut.shape[-1])
unclamped_xs = torch.flip((anchors_cut[:, 5:] / self.stride).round().long(), dims=(1,))
unclamped_xs = unclamped_xs.unsqueeze(2)
unclamped_xs = torch.repeat_interleave(unclamped_xs, n_fmaps, dim=0).reshape(-1, 1)
cut_xs = torch.clamp(unclamped_xs, 0, fmaps_w - 1)
cut_xs = cut_xs.reshape(anchors.shape[0], -1, 1)
unclamped_xs = unclamped_xs.reshape(anchors.shape[0], n_proposals, n_fmaps, fmaps_h, 1)
invalid_mask = (unclamped_xs < 0) | (unclamped_xs > fmaps_w)
cut_ys = torch.arange(0, fmaps_h)
cut_ys = cut_ys.repeat(n_fmaps * n_proposals)[:, None].reshape(n_proposals, n_fmaps, fmaps_h)
cut_ys = cut_ys.reshape(-1, 1)
cut_zs = torch.arange(n_fmaps).repeat_interleave(fmaps_h).repeat(n_proposals)[:, None]
return cut_zs, cut_ys, cut_xs, invalid_mask
def compute_anchor_cut_indices_proposals_xs(self, anchors, anchors_cut, n_fmaps, fmaps_w, fmaps_h):
n_proposals = self.n_proposal
anchors_cut = anchors_cut.reshape(-1, anchors_cut.shape[-1])
unclamped_xs = torch.flip((anchors_cut[:, 5:] / self.stride).round().long(), dims=(1,))
unclamped_xs = unclamped_xs.unsqueeze(2)
unclamped_xs = torch.repeat_interleave(unclamped_xs, n_fmaps, dim=0).reshape(-1, 1)
cut_xs = torch.clamp(unclamped_xs, 0, fmaps_w - 1)
cut_xs = cut_xs.reshape(anchors.shape[0], -1, 1)
unclamped_xs = unclamped_xs.reshape(anchors.shape[0], n_proposals, n_fmaps, fmaps_h, 1)
invalid_mask = (unclamped_xs < 0) | (unclamped_xs > fmaps_w)
return cut_xs, invalid_mask
def compute_anchor_cut_indices_proposals_ys_zs(self, n_fmaps, fmaps_w, fmaps_h):
n_proposals = self.n_proposal
cut_ys = torch.arange(0, fmaps_h)
cut_ys = cut_ys.repeat(n_fmaps * n_proposals)[:, None].reshape(n_proposals, n_fmaps, fmaps_h)
cut_ys = cut_ys.reshape(-1, 1)
cut_zs = torch.arange(n_fmaps).repeat_interleave(fmaps_h).repeat(n_proposals)[:, None]
return cut_zs, cut_ys
def cut_anchor_features(self, features):
# definitions
batch_size = features.shape[0]
n_proposals = len(self.anchors)
n_fmaps = features.shape[1]
batch_anchor_features = torch.zeros((batch_size, n_proposals, n_fmaps, self.fmap_h, 1), device=features.device)
# actual cutting
for batch_idx, img_features in enumerate(features):
rois = img_features[self.cut_zs, self.cut_ys, self.cut_xs].view(n_proposals, n_fmaps, self.fmap_h, 1)
rois[self.invalid_mask] = 0
batch_anchor_features[batch_idx] = rois
return batch_anchor_features
def cut_anchor_features_proposals(self, features, cut_xs, invalid_mask, cut_zs, cut_ys):
# definitions
batch_size = features.shape[0]
n_proposals = self.n_proposal
n_fmaps = features.shape[1]
batch_anchor_features = torch.zeros((batch_size, n_proposals, n_fmaps, self.fmap_h, 1), device=features.device)
# actual cutting
for batch_idx, img_features in enumerate(features):
rois = img_features[cut_zs, cut_ys, cut_xs[batch_idx]].view(n_proposals, n_fmaps, self.fmap_h, 1)
rois[invalid_mask[batch_idx]] = 0
batch_anchor_features[batch_idx] = rois
return batch_anchor_features
def generate_anchors(self, lateral_n, bottom_n):
left_anchors, left_cut = self.generate_side_anchors(self.left_angles, x=0., nb_origins=lateral_n)
right_anchors, right_cut = self.generate_side_anchors(self.right_angles, x=1., nb_origins=lateral_n)
bottom_anchors, bottom_cut = self.generate_side_anchors(self.bottom_angles, y=1., nb_origins=bottom_n)
return torch.cat([left_anchors, bottom_anchors, right_anchors]), torch.cat([left_cut, bottom_cut, right_cut])
def generate_side_anchors(self, angles, nb_origins, x=None, y=None):
if x is None and y is not None:
starts = [(x, y) for x in np.linspace(1., 0., num=nb_origins)]
elif x is not None and y is None:
starts = [(x, y) for y in np.linspace(1., 0., num=nb_origins)]
else:
raise Exception('Please define exactly one of `x` or `y` (not neither nor both)')
n_anchors = nb_origins * len(angles)
# each row, first for x and second for y:
# 2 scores, 1 start_y, start_x, 1 lenght, S coordinates, score[0] = negative prob, score[1] = positive prob
anchors = torch.zeros((n_anchors, 2 + 2 + 1 + self.n_offsets))
anchors_cut = torch.zeros((n_anchors, 2 + 2 + 1 + self.fmap_h))
for i, start in enumerate(starts):
for j, angle in enumerate(angles):
k = i * len(angles) + j
anchors[k] = self.generate_anchor(start, angle)
anchors_cut[k] = self.generate_anchor(start, angle, cut=True)
return anchors, anchors_cut
def _meshgrid(self, x, y, row_major=True):
"""Generate mesh grid of x and y.
Args:
x (torch.Tensor): Grids of x dimension.
y (torch.Tensor): Grids of y dimension.
row_major (bool, optional): Whether to return y grids first.
Defaults to True.
Returns:
tuple[torch.Tensor]: The mesh grids of x and y.
"""
xx = x.repeat(len(y))
yy = y.view(-1, 1).repeat(1, len(x)).view(-1)
if row_major:
return xx, yy
else:
return yy, xx
def gen_vp_edge_anchors(self, vps):
"""
:param vps: (batch_size, 2)
:self.vp_base_anchors : (n_proposal, 3)
:return:
"""
vps = torch.cat((vps, torch.zeros_like(vps[:, 0:1])), dim=-1)
#(batch_size, n_proposal, (x, y, angle))
vps_proposals = vps.unsqueeze(1) + self.vp_base_anchors.unsqueeze(0)
x_y_angles = vps_proposals.reshape(-1, 3)
#转换为从图像边界出发
x_y_angles = get_inter_with_border_mul(x_y_angles, self.img_h, self.img_w)
x_y_angles[:, :2] = x_y_angles[:, :2] / torch.tensor([self.img_w, self.img_h], device = vps.device)
return self.generate_anchor_parallel(x_y_angles, False), \
self.generate_anchor_parallel(x_y_angles, True)
def generate_anchor_parallel(self, x_y_angles, cut=False):
if cut:
anchor_ys = self.anchor_cut_ys
anchor = torch.zeros(x_y_angles.shape[0], 2 + 2 + 1 + self.fmap_h, device=x_y_angles.device)
else:
anchor_ys = self.anchor_ys
anchor = torch.zeros(x_y_angles.shape[0], 2 + 2 + 1 + self.n_offsets, device=x_y_angles.device)
angle = x_y_angles[:, 2] * math.pi / 180. # degrees to radians
start_x, start_y = x_y_angles[:, 0], x_y_angles[:, 1]
anchor[:, 2] = 1 - start_y
anchor[:, 3] = start_x
anchor[:, 5:] = (start_x * self.img_w).unsqueeze(1) - (
1 - anchor_ys.unsqueeze(0) - 1 + start_y.unsqueeze(1)) / torch.tan(angle).unsqueeze(1) * self.img_h
return anchor
def gen_vp_base_anchors(self, angles, grid_pixels, step, on_line=False):
x_range = torch.arange(-grid_pixels, grid_pixels + step, step)
if on_line:
y_range = torch.arange(-0, 0 + step, step)
else:
y_range = torch.arange(-grid_pixels, grid_pixels + step, step)
shift_xx, shift_yy = self._meshgrid(x_range, y_range)
shift_ww = torch.zeros_like(shift_xx)
shifts = torch.stack([shift_xx, shift_yy, shift_ww], dim=-1)
cws = torch.tensor([0.0, 0.0])
cws = cws.repeat(len(angles), 1)
angles = angles.unsqueeze(1).float()
base_anchors = torch.cat([cws, angles], dim=1)
#以一个点为矩形的anchors
vp_base_anchors = base_anchors[None, :, :] + shifts[:, None, :]
vp_base_anchors = vp_base_anchors.view(-1, 3)
return vp_base_anchors
def _angle_enum(self, cw, angles):
cw = cw.repeat(len(angles), 1).float()
angles = angles.unsqueeze(1).float()
return torch.cat([cw, angles], dim=1)
#start是归一化的
def generate_anchor(self, start, angle, cut=False):
if cut:
if angle < -1000:
return -1e6 * torch.ones(2 + 2 + 1 + self.fmap_h)
anchor_ys = self.anchor_cut_ys
anchor = torch.zeros(2 + 2 + 1 + self.fmap_h)
else:
if angle < -1000:
return -1e6 * torch.ones(2 + 2 + 1 +self.n_offsets)
anchor_ys = self.anchor_ys
anchor = torch.zeros(2 + 2 + 1 + self.n_offsets)
angle = angle * math.pi / 180. # degrees to radians
start_x, start_y = start
anchor[2] = 1 - start_y
anchor[3] = start_x
anchor[5:] = start_x * self.img_w - (1 - anchor_ys - 1 + start_y) / math.tan(angle) * self.img_h
return anchor
def draw_anchors(self, img_w, img_h, k=None):
base_ys = self.anchor_ys.numpy()
img = np.zeros((img_h, img_w, 3), dtype=np.uint8)
i = -1
for anchor in self.anchors:
i += 1
if k is not None and i != k:
continue
anchor = anchor.numpy()
xs = anchor[5:]
ys = base_ys * img_h
points = np.vstack((xs, ys)).T.round().astype(int)
for p_curr, p_next in zip(points[:-1], points[1:]):
img = cv2.line(img, tuple(p_curr), tuple(p_next), color=(0, 255, 0), thickness=5)
return img
@staticmethod
def initialize_layer(layer):
if isinstance(layer, (nn.Conv2d, nn.Linear)):
torch.nn.init.normal_(layer.weight, mean=0., std=0.001)
if layer.bias is not None:
torch.nn.init.constant_(layer.bias, 0)
def proposals_to_pred(self, proposals):
self.anchor_ys = self.anchor_ys.to(proposals.device)
self.anchor_ys = self.anchor_ys.double()
lanes = []
for lane in proposals:
lane_xs = lane[5:] / self.img_w
start = int(round(lane[2].item() * self.n_strips))
length = int(round(lane[4].item()))
end = start + length - 1
end = min(end, len(self.anchor_ys) - 1)
# end = label_end
# if the proposal does not start at the bottom of the image,
# extend its proposal until the x is outside the image
mask = ~((((lane_xs[:start] >= 0.) &
(lane_xs[:start] <= 1.)).cpu().numpy()[::-1].cumprod()[::-1]).astype(np.bool))
lane_xs[end + 1:] = -2
lane_xs[:start][mask] = -2
lane_ys = self.anchor_ys[lane_xs >= 0]
lane_xs = lane_xs[lane_xs >= 0]
lane_xs = lane_xs.flip(0).double()
lane_ys = lane_ys.flip(0)
if len(lane_xs) <= 1:
continue
points = torch.stack((lane_xs.reshape(-1, 1), lane_ys.reshape(-1, 1)), dim=1).squeeze(2)
lane = Lane(points=points.cpu().numpy(),
metadata={
'start_x': lane[3],
'start_y': lane[2],
'conf': lane[1]
})
lanes.append(lane)
return lanes
def decode(self, proposals_list, as_lanes=False):
softmax = nn.Softmax(dim=1)
decoded = []
for proposals, _, _, _ in proposals_list:
proposals[:, :2] = softmax(proposals[:, :2])
proposals[:, 4] = torch.round(proposals[:, 4])
if proposals.shape[0] == 0:
decoded.append([])
continue
if as_lanes:
pred = self.proposals_to_pred(proposals)
else:
pred = proposals
decoded.append(pred)
return decoded
def cuda(self, device=None):
cuda_self = super().cuda(device)
cuda_self.vp_base_anchors = cuda_self.vp_base_anchors .cuda(device)
cuda_self.anchor_ys = cuda_self.anchor_ys.cuda(device)
cuda_self.anchor_cut_ys = cuda_self.anchor_ys.cuda(device)
cuda_self.cut_zs = cuda_self.cut_zs.cuda(device)
cuda_self.cut_ys = cuda_self.cut_ys.cuda(device)
cuda_self.cut_xs = cuda_self.cut_xs.cuda(device)
cuda_self.invalid_mask = cuda_self.invalid_mask.cuda(device)
return cuda_self
def to(self, *args, **kwargs):
device_self = super().to(*args, **kwargs)
# device_self.anchors = device_self.anchors.to(*args, **kwargs)
device_self.vp_base_anchors = device_self.vp_base_anchors.to(*args, **kwargs)
device_self.anchor_ys = device_self.anchor_ys.to(*args, **kwargs)
device_self.anchor_cut_ys = device_self.anchor_cut_ys.to(*args, **kwargs)
device_self.cut_zs = device_self.cut_zs.to(*args, **kwargs)
device_self.cut_ys = device_self.cut_ys.to(*args, **kwargs)
# device_self.cut_xs = device_self.cut_xs.to(*args, **kwargs)
# device_self.invalid_mask = device_self.invalid_mask.to(*args, **kwargs)
return device_self
def get_backbone(backbone, pretrained=False):
if backbone == 'resnet122':
backbone = resnet122_cifar()
fmap_c = 64
stride = 4
elif backbone == 'resnet34':
back = resnet34(pretrained=pretrained)
back.load_state_dict(torch.load('resnet34-333f7ec4.pth'))
backbone = torch.nn.Sequential(*list(back.children())[:-2])
fmap_c = 512
stride = 32
elif backbone == 'resnet18':
backbone = torch.nn.Sequential(*list(resnet18(pretrained=pretrained).children())[:-2])
fmap_c = 512
stride = 32
else:
raise NotImplementedError('Backbone not implemented: `{}`'.format(backbone))
return backbone, fmap_c, stride
def get_inter_with_border_mul(bboxes, H, W):
"""
并行得到N个anchor与边界的交点
bboxes : torch.Tensor (N, 4) in (x, y, w, a)
"""
#bboxes = bboxes.float()
k1 = (H-1-bboxes[:, 1]) / (-bboxes[:, 0] + 1e-6)
k2 = (H-1-bboxes[:, 1]) / (W-1-bboxes[:, 0] + 1e-6)
k = torch.tan(bboxes[:, 2] * np.pi / 180)
mask1 = ((bboxes[:, 2] >= 90) & (k >= k1)).reshape((-1, 1))
mask3 = ((bboxes[:, 2] < 90) & (k <=k2)).reshape((-1, 1))
mask2 = (~(mask1 | mask3)).reshape((-1, 1))
#print('mask', mask1, mask2, mask3)
#左边交点的y
p_l = torch.zeros_like(bboxes[:, :2])
p_d = torch.zeros_like(p_l)
p_r = torch.zeros_like(p_l)
p_l[:, 1] = -k*bboxes[:, 0] + bboxes[:, 1]
#下边交点的x
p_d[:, 1].fill_(H-1)
p_d[:, 0] = (H-1-bboxes[:, 1]) / (k + 1e-6) + bboxes[:, 0]
#右边交点的y
p_r[:, 0].fill_(W-1)
p_r[:, 1] = k*(W-1-bboxes[:, 0]) + bboxes[:, 1]
inter_p = mask1 * p_l + mask2 * p_d + mask3 * p_r
return torch.cat((inter_p, bboxes[:, 2:3]), 1)
``` |
{
"source": "jinmingteo/pytorch-image-models",
"score": 3
} |
#### File: jinmingteo/pytorch-image-models/classification_model.py
```python
import os
import time
import logging
import numpy as np
import torch
from torchvision import transforms
from timm.models import create_model, apply_test_time_pool
from timm.data import ImageDataset, create_loader, resolve_data_config
from timm.utils import AverageMeter, setup_default_logging
torch.backends.cudnn.benchmark = True
class Classifier_Model:
def __init__(self, model_name, checkpoint_file, num_classes,
test_time_pool=False, img_size=None, mean=None, std=None):
self.model = create_model(model_name=model_name, num_classes=num_classes,
in_chans=3, checkpoint_path=checkpoint_file)
self.logger = logging.getLogger('inference')
self.logger.info('Model %s created, param count: %d' %
(model_name, sum([m.numel() for m in self.model.parameters()])))
self.config = resolve_data_config(args=dict(
img_size=img_size,
mean=mean,
std=std
), model=self.model)
if test_time_pool:
self.model , self.test_time_pool = apply_test_time_pool(self.model, self.config)
self.model.cuda()
self.model.eval()
def predict(self, img):
'''
img: A cv2 image in RGB format
output: classification
'''
data_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(self.config['mean'], self.config['std'])
])
img = data_transforms(img).cuda().unsqueeze(0)
self.model.eval()
labels = self.model(img)
labels = torch.nn.functional.softmax(labels)
labels = labels.detach().cpu().numpy()
return labels.argmax(), labels.max()
if __name__ == '__main__':
import cv2
model = Classifier_Model('resnet50', 'weights/resnet50_model_best.pth.tar', num_classes=5)
img = cv2.imread('imagenette2-160/train/n01440764/ILSVRC2012_val_00000293.JPEG')
print (model.predict(img))
``` |
{
"source": "jinmingyi1998/pytorch-OpCounter",
"score": 2
} |
#### File: pytorch-OpCounter/thop/fx_profile.py
```python
import torch
import torch as th
import torch.nn as nn
from distutils.version import LooseVersion
if LooseVersion(torch.__version__) < LooseVersion("1.8.0"):
logging.warning(
f"torch.fx requires version higher than 1.8.0. "\
f"But You are using an old version PyTorch {torch.__version__}. ")
def count_clamp(input_shapes, output_shapes):
return 0
def count_mul(input_shapes, output_shapes):
# element-wise
return output_shapes[0].numel()
def count_nn_linear(input_shapes, output_shapes):
in_shape = input_shapes[0]
out_shape = output_shapes[0]
in_features = in_shape[-1]
num_elements = out_shape.numel()
return in_features * num_elements
count_map = {
nn.Linear: count_nn_linear,
"clamp": count_clamp,
"<built-in function mul>": count_mul,
"<built-in function truediv>": count_mul,
}
from torch.fx import symbolic_trace
from torch.fx.passes.shape_prop import ShapeProp
def null_print(*args, **kwargs):
return
def fx_profile(m: nn.Module, input: th.Tensor, verbose=True):
gm : torch.fx.GraphModule = symbolic_trace(m)
g = gm.graph
ShapeProp(gm).propagate(input)
fprint = null_print
if verbose:
fprint = print
v_maps = {}
total_flops = 0
for node in gm.graph.nodes:
# print(f"{node.target},\t{node.op},\t{node.meta['tensor_meta'].dtype},\t{node.meta['tensor_meta'].shape}")
fprint(f"NodeOP:{node.op},\tTarget:{node.target},\tNodeName:{node.name},\tNodeArgs:{node.args}")
node_op_type = str(node.target).split(".")[-1]
node_flops = None
input_shapes = []
output_shapes = []
fprint("input_shape:", end="\t")
for arg in node.args:
if str(arg) not in v_maps:
continue
fprint(f"{v_maps[str(arg)]}", end="\t")
input_shapes.append(v_maps[str(arg)])
fprint()
fprint(f"output_shape:\t{node.meta['tensor_meta'].shape}")
output_shapes.append(node.meta['tensor_meta'].shape)
if node.op in ["output", "placeholder"]:
node_flops = 0
elif node.op == "call_function":
# torch internal functions
if str(node.target) in count_map:
node_flops = count_map[str(node.target)](input_shapes, output_shapes)
pass
elif node.op == "call_method":
# torch internal functions
# print(str(node.target) in count_map, str(node.target), count_map.keys())
if str(node.target) in count_map:
node_flops = count_map[str(node.target)](input_shapes, output_shapes)
elif node.op == "call_module":
# torch.nn modules
m = getattr(net, node.target, None)
fprint(type(m), type(m) in count_map)
if type(m) in count_map:
node_flops = count_map[type(m)](input_shapes, output_shapes)
if node_op_type not in ["relu", "maxpool", "avgpool"]:
fprint(f"weight_shape: {net.state_dict()[node.target + '.weight'].shape}")
else:
fprint(f"weight_shape: None")
v_maps[str(node.name)] = node.meta['tensor_meta'].shape
fprint(f"NodeFlops: {node_flops}")
if node_flops is not None:
total_flops += node_flops
fprint("==" * 20)
return total_flops
if __name__ == '__main__':
class MyOP(nn.Module):
def forward(self, input):
return input / 1
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(5, 3)
self.linear2 = torch.nn.Linear(5, 3)
self.myop = MyOP()
def forward(self, x):
out1 = self.linear1(x)
out2 = self.linear2(x).clamp(min=0.0, max=1.0)
return self.myop(out1 + out2)
net = MyModule()
data = th.randn(20, 5)
flops = fx_profile(net, data, verbose=False)
print(flops)
```
#### File: pytorch-OpCounter/thop/onnx_profile.py
```python
import torch
import torch.nn
import onnx
from onnx import numpy_helper
import numpy as np
from thop.vision.onnx_counter import onnx_operators
class OnnxProfile():
def __init__(self) -> None:
pass
def calculate_params(self, model: onnx.ModelProto):
onnx_weights = model.graph.initializer
params = 0
for onnx_w in onnx_weights:
try:
weight = numpy_helper.to_array(onnx_w)
params += np.prod(weight.shape)
except Exception as _:
pass
return params
def create_dict(self, weight, input, output):
diction = {}
for w in weight:
dim = np.array(w.dims)
diction[str(w.name)] = dim
if (dim.size == 1):
diction[str(w.name)] = np.append(1, dim)
for i in input:
# print(i.type.tensor_type.shape.dim[0].dim_value)
dim = np.array(i.type.tensor_type.shape.dim[0].dim_value)
# print(i.type.tensor_type.shape.dim.__sizeof__())
#name2dims[str(i.name)] = [dim]
dim = []
for key in i.type.tensor_type.shape.dim:
dim = np.append(dim, int(key.dim_value))
# print(key.dim_value)
# print(dim)
diction[str(i.name)] = dim
if(dim.size == 1):
diction[str(i.name)] = np.append(1, dim)
for o in output:
dim = np.array(o.type.tensor_type.shape.dim[0].dim_value)
diction[str(o.name)] = [dim]
if(dim.size == 1):
diction[str(o.name)] = np.append(1, dim)
return diction
def nodes_counter(self, diction, node):
if node.op_type not in onnx_operators:
print("Sorry, we haven't add ", node.op_type, "into dictionary.")
return 0, None, None
else:
fn = onnx_operators[node.op_type]
return fn(diction, node)
def calculate_macs(self, model: onnx.ModelProto) -> torch.DoubleTensor:
macs = 0
name2dims = {}
weight = model.graph.initializer
nodes = model.graph.node
input = model.graph.input
output = model.graph.output
name2dims = self.create_dict(weight, input, output)
macs = 0
for n in nodes:
macs_adding, out_size, outname = self.nodes_counter(name2dims, n)
name2dims[outname] = out_size
macs += macs_adding
return np.array(macs[0])
```
#### File: thop/vision/onnx_counter.py
```python
import torch
import numpy as np
from onnx import numpy_helper
from thop.vision.basic_hooks import zero_ops
from .counter import counter_matmul, counter_zero_ops,\
counter_conv, counter_mul, counter_norm, counter_pow,\
counter_sqrt, counter_div, counter_softmax, counter_avgpool
def onnx_counter_matmul(diction, node):
input1 = node.input[0]
input2 = node.input[1]
input1_dim = diction[input1]
input2_dim = diction[input2]
out_size = np.append(input1_dim[0:-1], input2_dim[-1])
output_name = node.output[0]
macs = counter_matmul(input1_dim, out_size[-2:])
return macs, out_size, output_name
def onnx_counter_add(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
out_size = diction[node.input[1]]
else:
out_size = diction[node.input[0]]
output_name = node.output[0]
macs = counter_zero_ops()
# if '140' in diction:
# print(diction['140'],output_name)
return macs, out_size, output_name
def onnx_counter_conv(diction, node):
# print(node)
# bias,kernelsize,outputsize
dim_bias = 0
input_count = 0
for i in node.input:
input_count += 1
if (input_count == 3):
dim_bias = 1
dim_weight = diction[node.input[1]]
else:
dim_weight = diction[node.input[1]]
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
if(attr.name == 'strides'):
dim_stride = attr.ints
if(attr.name == 'pads'):
dim_pad = attr.ints
if(attr.name == 'dilations'):
dim_dil = attr.ints
if(attr.name == 'group'):
group = attr.i
# print(dim_dil)
dim_input = diction[node.input[0]]
output_size = np.append(
dim_input[0:-np.array(dim_kernel).size-1], dim_weight[0])
hw = np.array(dim_input[-np.array(dim_kernel).size:])
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_dil[i] *
(dim_kernel[i]-1)-1)/dim_stride[i]+1)
output_size = np.append(output_size, hw)
macs = counter_conv(dim_bias, np.prod(dim_kernel),
np.prod(output_size), dim_weight[1], group)
output_name = node.output[0]
# if '140' in diction:
# print("conv",diction['140'],output_name)
return macs, output_size, output_name
def onnx_counter_constant(diction, node):
# print("constant",node)
macs = counter_zero_ops()
output_name = node.output[0]
output_size = [1]
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_mul(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_mul(np.prod(input_size))
output_size = diction[node.input[0]]
output_name = node.output[0]
return macs, output_size, output_name
def onnx_counter_bn(diction, node):
input_size = diction[node.input[0]]
macs = counter_norm(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_relu(diction, node):
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
#print(macs, output_size, output_name)
# if '140' in diction:
# print("relu",diction['140'],output_name)
return macs, output_size, output_name
def onnx_counter_reducemean(diction, node):
keep_dim = 0
for attr in node.attribute:
if('axes' in attr.name):
dim_axis = np.array(attr.ints)
elif('keepdims' in attr.name):
keep_dim = attr.i
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
if (keep_dim == 1):
output_size = input_size
else:
output_size = np.delete(input_size, dim_axis)
#output_size = input_size
return macs, output_size, output_name
def onnx_counter_sub(diction, node):
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_pow(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_pow(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_sqrt(diction, node):
input_size = diction[node.input[0]]
macs = counter_sqrt(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_div(diction, node):
if np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size:
input_size = diction[node.input[1]]
else:
input_size = diction[node.input[0]]
macs = counter_div(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_instance(diction, node):
input_size = diction[node.input[0]]
macs = counter_norm(np.prod(input_size))
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_softmax(diction, node):
input_size = diction[node.input[0]]
dim = node.attribute[0].i
nfeatures = input_size[dim]
batch_size = np.prod(input_size) / nfeatures
macs = counter_softmax(nfeatures, batch_size)
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_pad(diction, node):
# # TODO add constant name and output real vector
# if
# if (np.array(diction[node.input[1]]).size >= np.array(diction[node.input[0]]).size):
# input_size = diction[node.input[1]]
# else:
# input_size = diction[node.input[0]]
input_size = diction[node.input[0]]
macs = counter_zero_ops()
output_name = node.output[0]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_averagepool(diction, node):
# TODO add support of ceil_mode and floor
macs = counter_avgpool(np.prod(diction[node.input[0]]))
output_name = node.output[0]
dim_pad = None
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
elif(attr.name == 'strides'):
dim_stride = attr.ints
elif(attr.name == 'pads'):
dim_pad = attr.ints
elif(attr.name == 'dilations'):
dim_dil = attr.ints
# print(dim_dil)
dim_input = diction[node.input[0]]
hw = dim_input[-np.array(dim_kernel).size:]
if dim_pad is not None:
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
else:
for i in range(hw.size):
hw[i] = int((hw[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_flatten(diction, node):
# print(node)
macs = counter_zero_ops()
output_name = node.output[0]
axis = node.attribute[0].i
input_size = diction[node.input[0]]
output_size = np.append(input_size[axis-1], np.prod(input_size[axis:]))
# print("flatten",output_size)
return macs, output_size, output_name
def onnx_counter_gemm(diction, node):
# print(node)
# Compute Y = alpha * A' * B' + beta * C
input_size = diction[node.input[0]]
dim_weight = diction[node.input[1]]
# print(input_size,dim_weight)
macs = np.prod(input_size) * dim_weight[1] + dim_weight[0]
output_size = np.append(input_size[0:-1], dim_weight[0])
output_name = node.output[0]
return macs, output_size, output_name
pass
def onnx_counter_maxpool(diction, node):
# TODO add support of ceil_mode and floor
# print(node)
macs = counter_zero_ops()
output_name = node.output[0]
dim_pad = None
for attr in node.attribute:
# print(attr)
if(attr.name == 'kernel_shape'):
dim_kernel = attr.ints # kw,kh
elif(attr.name == 'strides'):
dim_stride = attr.ints
elif(attr.name == 'pads'):
dim_pad = attr.ints
elif(attr.name == 'dilations'):
dim_dil = attr.ints
# print(dim_dil)
dim_input = diction[node.input[0]]
hw = dim_input[-np.array(dim_kernel).size:]
if dim_pad is not None:
for i in range(hw.size):
hw[i] = int((hw[i]+2*dim_pad[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
else:
for i in range(hw.size):
hw[i] = int((hw[i]-dim_kernel[i])/dim_stride[i]+1)
output_size = np.append(dim_input[0:-np.array(dim_kernel).size], hw)
#print(macs, output_size, output_name)
return macs, output_size, output_name
def onnx_counter_globalaveragepool(diction, node):
macs = counter_zero_ops()
output_name = node.output[0]
input_size = diction[node.input[0]]
output_size = input_size
return macs, output_size, output_name
def onnx_counter_concat(diction, node):
# print(node)
# print(diction[node.input[0]])
axis = node.attribute[0].i
input_size = diction[node.input[0]]
for i in node.input:
dim_concat = diction[i][axis]
output_size = input_size
output_size[axis] = dim_concat
output_name = node.output[0]
macs = counter_zero_ops()
return macs, output_size, output_name
def onnx_counter_clip(diction, node):
macs = counter_zero_ops()
output_name = node.output[0]
input_size = diction[node.input[0]]
output_size = input_size
return macs, output_size, output_name
onnx_operators = {
'MatMul': onnx_counter_matmul,
'Add': onnx_counter_add,
'Conv': onnx_counter_conv,
'Mul': onnx_counter_mul,
'Constant': onnx_counter_constant,
'BatchNormalization': onnx_counter_bn,
'Relu': onnx_counter_relu,
'ReduceMean': onnx_counter_reducemean,
'Sub': onnx_counter_sub,
'Pow': onnx_counter_pow,
'Sqrt': onnx_counter_sqrt,
'Div': onnx_counter_div,
'InstanceNormalization': onnx_counter_instance,
'Softmax': onnx_counter_softmax,
'Pad': onnx_counter_pad,
'AveragePool': onnx_counter_averagepool,
'MaxPool': onnx_counter_maxpool,
'Flatten': onnx_counter_flatten,
'Gemm': onnx_counter_gemm,
'GlobalAveragePool': onnx_counter_globalaveragepool,
'Concat': onnx_counter_concat,
'Clip': onnx_counter_clip,
None: None,
}
``` |
{
"source": "jinminhao/cloud-native-ernie",
"score": 2
} |
#### File: cloud-native-ernie/doc/simple-case.py
```python
import sys
sys.path.append('./ERNIE')
import numpy as np
from sklearn.metrics import f1_score
import paddle as P
import paddle.fluid as F
import paddle.fluid.layers as L
import paddle.fluid.dygraph as D
from ernie.tokenizing_ernie import ErnieTokenizer
from ernie.modeling_ernie import ErnieModelForSequenceClassification
BATCH=32
MAX_SEQLEN=300
LR=5e-5
EPOCH=10
D.guard().__enter__()
ernie = ErnieModelForSequenceClassification.from_pretrained('ernie-1.0', num_labels=3)
optimizer = F.optimizer.Adam(LR, parameter_list=ernie.parameters())
tokenizer = ErnieTokenizer.from_pretrained('ernie-1.0')
def make_data(path):
data = []
for i, l in enumerate(open(path)):
if i == 0:
continue
l = l.strip().split('\t')
text, label = l[0], int(l[1])
text_id, _ = tokenizer.encode(text)
text_id = text_id[:MAX_SEQLEN]
text_id = np.pad(text_id, [0, MAX_SEQLEN-len(text_id)], mode='constant')
label_id = np.array(label+1)
data.append((text_id, label_id))
return data
train_data = make_data('./chnsenticorp/train/part.0')
test_data = make_data('./chnsenticorp/dev/part.0')
def get_batch_data(data, i):
d = data[i*BATCH: (i + 1) * BATCH]
feature, label = zip(*d)
feature = np.stack(feature)
label = np.stack(list(label))
feature = D.to_variable(feature)
label = D.to_variable(label)
return feature, label
for i in range(EPOCH):
np.random.shuffle(train_data)
#train
for j in range(len(train_data) // BATCH):
feature, label = get_batch_data(train_data, j)
loss, _ = ernie(feature, labels=label)
loss.backward()
optimizer.minimize(loss)
ernie.clear_gradients()
if j % 10 == 0:
print('train %d: loss %.5f' % (j, loss.numpy()))
# evaluate
if j % 100 == 0:
all_pred, all_label = [], []
with D.base._switch_tracer_mode_guard_(is_train=False):
ernie.eval()
for j in range(len(test_data) // BATCH):
feature, label = get_batch_data(test_data, j)
loss, logits = ernie(feature, labels=label)
all_pred.extend(L.argmax(logits, -1).numpy())
all_label.extend(label.numpy())
ernie.train()
f1 = f1_score(all_label, all_pred, average='macro')
acc = (np.array(all_label) == np.array(all_pred)).astype(np.float32).mean()
print('acc %.5f' % acc)
``` |
{
"source": "jinmoo21/PWDriver",
"score": 3
} |
#### File: PWDriver/pwdriver/util.py
```python
import os
def parse_boolean(arg):
return arg.lower() in ['true', 'y', 'yes']
def get_pattern_matched_file(path, regex):
import re
file_list = []
pattern = re.compile(regex)
for file in os.listdir(path):
if pattern.match(file):
file_list.append(file)
return file_list
def set_file_executable(path):
if os.path.isfile(path) and not os.access(path, os.X_OK):
import stat
os.chmod(path, os.stat(path).st_mode | stat.S_IEXEC)
def get_logger(name=None):
import logging
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-5s [%(name)s] '
'%(funcName)s(%(pathname)s:%(lineno)d): %(message)s')
console = logging.StreamHandler()
from pwdriver.val import LOG_DIR, LOG_NAME, ROOT_DIR
if not os.path.exists(os.path.join(ROOT_DIR, LOG_DIR)):
os.makedirs(os.path.join(ROOT_DIR, LOG_DIR))
file_handler = logging.FileHandler(os.path.join(ROOT_DIR, LOG_DIR, LOG_NAME))
console.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
console.setFormatter(formatter)
file_handler.setFormatter(formatter)
logger.addHandler(console)
logger.addHandler(file_handler)
return logger
``` |
{
"source": "Jinmun-Park/covid19_korea",
"score": 3
} |
#### File: covid19_korea/routes/trend_route.py
```python
import pandas as pd
import datetime as dt
from src.utils.api import flask_category
# ====================== Flask Blueprint ====================== #
from flask import Blueprint, render_template
df, df_category, df_channeltitle, df_category_view_per, df_category_like_per, df_category_comment_per, df_top_channel, df_top_category, df_top_comment = flask_category(command='daily')
bp = Blueprint('latest_trend', __name__, url_prefix='/latest_trend')
# ======================== Flask Route ======================== #
@bp.route('/', methods=["GET"])
def trend():
# Chart figures
category = [i for i in df_category.index]
category_rate = [i for i in df_category.카테고리]
category_view_per = [i for i in df_category_view_per.조회수]
category_like_per = [i for i in df_category_like_per.좋아요수]
category_comment_per = [i for i in df_category_comment_per.댓글수]
# Table
category_channel = df.groupby(['카테고리', '채널명']).sum().sort_values('조회수', ascending=False).reset_index()
category_channel['조회수'] = category_channel['조회수'].map("{:,}".format)
category_channel['좋아요수'] = category_channel['좋아요수'].map("{:,}".format)
category_channel['댓글수'] = category_channel['댓글수'].map("{:,}".format)
# Chart Basic information figures
category_top_channel = df_top_channel.채널명.iloc[0]
category_top_category = df_top_category.카테고리.iloc[0]
category_top_comment = format(df_top_comment.댓글수.iloc[0], ",")
return render_template('latest_trend.html',
category=category,
category_rate=category_rate,
category_channel=category_channel,
category_view_per=category_view_per,
category_like_per=category_like_per,
category_comment_per=category_comment_per,
category_top_channel=category_top_channel,
category_top_category=category_top_category,
category_top_comment=category_top_comment
)
@bp.route('/category', methods=["GET"])
def trend_category():
# Chart figures
category = [i for i in df_category.index]
category_rate = [i for i in df_category.카테고리]
category_view_per = [i for i in df_category_view_per.조회수]
category_like_per = [i for i in df_category_like_per.좋아요수]
category_comment_per = [i for i in df_category_comment_per.댓글수]
# Table
category_channel = df.groupby(['카테고리', '채널명']).sum().sort_values('조회수', ascending=False).reset_index()
category_channel['조회수'] = category_channel['조회수'].map("{:,}".format)
category_channel['좋아요수'] = category_channel['좋아요수'].map("{:,}".format)
category_channel['댓글수'] = category_channel['댓글수'].map("{:,}".format)
# Chart basic information figures
category_top_channel = df_top_channel.채널명.iloc[0]
category_top_category = df_top_category.카테고리.iloc[0]
category_top_comment = format(df_top_comment.댓글수.iloc[0], ",")
return render_template('category.html',
category=category,
category_rate=category_rate,
category_channel=category_channel,
category_view_per=category_view_per,
category_like_per=category_like_per,
category_comment_per=category_comment_per,
category_top_channel=category_top_channel,
category_top_category=category_top_category,
category_top_comment=category_top_comment
)
@bp.route('/channel', methods=["GET"])
def trend_channel():
# Importing function
if 'flask_channel' in globals():
pass
else:
from src.utils.api import flask_channel
global flask_channel
flask_channel = flask_channel(command='daily')
# Channel names
channel_label = [i for i in flask_channel.채널명]
channel_view = [i for i in (flask_channel.채널총조회수)/1000]
channel_subs = [i for i in (flask_channel.채널구독수)]
# Top channel information figures
top_channel_select = flask_channel[flask_channel['채널총조회수'] == flask_channel['채널총조회수'].max()]
top_channel = top_channel_select.채널명.iloc[0]
top_channel_num = format(int(top_channel_select.채널총조회수.iloc[0]), ",")
# Top channel top right information
top_channel_url = top_channel_select.썸네일.iloc[0]
top_channel_videoid = top_channel_select.동영상아이디.iloc[0]
top_channel_channelid = top_channel_select.채널아이디.iloc[0]
top_channel_publish = top_channel_select.채널개설날짜.iloc[0]
top_channel_cateogry = top_channel_select.카테고리.iloc[0]
top_channel_like = format(int(top_channel_select.좋아요수.iloc[0]), ",")
top_channel_comment = format(int(top_channel_select.댓글수.iloc[0]), ",")
# Top channel subscriptions
top_subs_num = format(int(flask_channel[flask_channel['채널구독수'] == flask_channel['채널구독수'].max()].채널구독수.iloc[0]), ",")
top_subs = flask_channel[flask_channel['채널구독수'] == flask_channel['채널구독수'].max()].채널명.iloc[0]
# Latest published channel
latest_channel_select = flask_channel.sort_values(by='채널개설날짜').tail(1)
latest_channel = latest_channel_select.채널명.iloc[0]
latest_channel_num = latest_channel_select.채널개설날짜.iloc[0]
# Top channel top right information
latest_channel_url = latest_channel_select.썸네일.iloc[0]
latest_channel_videoid = latest_channel_select.동영상아이디.iloc[0]
latest_channel_channelid = latest_channel_select.채널아이디.iloc[0]
#latest_channel_publish = latest_channel_select.채널개설날짜.iloc[0]
latest_channel_cateogry = latest_channel_select.카테고리.iloc[0]
latest_channel_like = format(int(latest_channel_select.좋아요수.iloc[0]), ",")
latest_channel_comment = format(int(latest_channel_select.댓글수.iloc[0]), ",")
# trend_channel_chart
channel_table = flask_channel[['동영상', '날짜', '채널명', '채널개설날짜', '카테고리', '채널총조회수', '채널구독수', '채널비디오수']]
return render_template('channel.html',
channel_label=channel_label,
channel_view=channel_view,
channel_subs=channel_subs,
top_channel=top_channel,
top_channel_num=top_channel_num,
top_channel_url=top_channel_url,
top_channel_videoid=top_channel_videoid,
top_channel_channelid=top_channel_channelid,
top_channel_publish= top_channel_publish,
top_channel_cateogry=top_channel_cateogry,
top_channel_like=top_channel_like,
top_channel_comment=top_channel_comment,
top_subs=top_subs,
top_subs_num=top_subs_num,
latest_channel=latest_channel,
latest_channel_num=latest_channel_num,
latest_channel_url=latest_channel_url,
latest_channel_videoid=latest_channel_videoid,
latest_channel_channelid=latest_channel_channelid,
latest_channel_cateogry=latest_channel_cateogry,
latest_channel_like=latest_channel_like,
latest_channel_comment=latest_channel_comment,
channel_table=channel_table
)
@bp.route('/timeframe', methods=["GET"])
def trend_timeframe():
db = flask_channel
return render_template('timeframe.html', db=db)
```
#### File: src/models/bert_load.py
```python
import pandas as pd
import re
import torch
from transformers import BertTokenizer
from transformers import BertForSequenceClassification, AdamW, BertConfig
from keras.preprocessing.sequence import pad_sequences
import numpy as np
from datetime import datetime, date
# GOOGLE CLOUD BUCKET
import os
from google.cloud import storage #pip install --upgrade google-cloud-storage
import io
# ====================== FUNCTION SETUP ====================== #
#print(torch.cuda.memory_allocated())
#print(torch.cuda.memory_reserved())
def read_pickle(file_name: str) -> pd.DataFrame:
return pd.read_pickle('Pickle/' + file_name)
def sentiment_score(x):
"""
SECTION : sentiment
DESCRIPTION : Return text feeling0s from sentiment scores
"""
if x == 0:
return("공포")
elif x == 1:
return("놀람")
elif x == 2:
return("분노")
elif x == 3:
return("슬픔")
elif x == 4:
return("중립")
elif x == 5:
return("행복")
else:
return("혐오")
def setup_device():
"""
SECTION : sentiment
USAGE : Select GPU or CPU
DESCRIPTION : Choose the device based on Torch library installation
"""
# Setup device (CPU or GPU)
if torch.cuda.is_available():
# Remove memory
torch.cuda.empty_cache()
# Set GPU
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
else:
# Set CPU
device = torch.device("cpu")
print('No GPU available, using the CPU instead.')
return device
def load_model_bucket():
"""
WARNING : 'load_model_bucket' is depreciated due to time management
SECTION : sentiment
USAGE : Connection to Google Storage Bucket
"""
# ====================== Setup ====================== #
pd.options.mode.chained_assignment = None # Off warning messages, default='warn'
starttime = datetime.now()
print(starttime)
print('Started loading Traiend BERT Model from Google Cloud')
# GOOGLE CREDENTIALS & SECRET MANAGER
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "config/youtubeapi-314206-46ffa30d1127.json"
# Initiate storage
storage_client = storage.Client()
bucket = storage_client.get_bucket('bert_ver2')
# Get blob
en_model_blob = bucket.get_blob('bert_model_gpu_v2.pth')
en_model = en_model_blob.download_as_string()
# Because model downloaded into string, need to convert it back
buffer = io.BytesIO(en_model)
# ====================== End time ====================== #
print('Successfully loaded Trained BERT Model from Google Cloud')
endtime = datetime.now()
print(endtime)
timetaken = endtime - starttime
print('Time taken : ' + timetaken.__str__())
return buffer
def load_model(path, device):
"""
SECTION : sentiment
DESCRIPTION :
1. path : Loading trained BERT Model from 'bert_save.py' We support two ways to call model.
One is loading from google bucket or just from local directory.
2. device : Loading device (CPU or GPU) from 'setup_device' function
"""
# ====================== Setup ====================== #
pd.options.mode.chained_assignment = None # Off warning messages, default='warn'
# Model setup
model = BertForSequenceClassification.from_pretrained("bert-base-multilingual-cased", num_labels=7)
model.load_state_dict(torch.load(path, map_location='cpu'))
model.to(device)
model.eval()
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased', do_lower_case=False)
return model, tokenizer
def run_model(model, tokenizer, sentences, device):
"""
SECTION : sentiment
DESCRIPTION :
1. sentences : put sentences extracted from youtube. Please note that the sentence should be in the nested list format
2. device : run setup_device() function
"""
# ====================== Setup ====================== #
pd.options.mode.chained_assignment = None # Off warning messages, default='warn'
# Pytorch evaluating model
model.eval()
# Tokenization
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
# Set the max length of sentence
MAX_LEN = 128
# Convert Tokens into index(array)
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
# 문장을 MAX_LEN 길이에 맞게 자르고, 모자란 부분을 패딩 0으로 채움
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
# Set attention mask
attention_masks = []
# 어텐션 마스크를 패딩이 아니면 1, 패딩이면 0으로 설정
# 패딩 부분은 BERT 모델에서 어텐션을 수행하지 않아 속도 향상
for seq in input_ids:
seq_mask = [float(i > 0) for i in seq]
attention_masks.append(seq_mask)
# Converting data into pytorch tensor
inputs = torch.tensor(input_ids)
masks = torch.tensor(attention_masks)
# Inserting batch into GPU
b_input_ids = inputs.to(device)
b_input_mask = masks.to(device)
# No gradient calculation
with torch.no_grad():
# Running Forward
outputs = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask)
# Calculating loss
logits = outputs[0]
# Transferring data into CPU
logits = logits.detach().cpu().numpy()
return int(np.argmax(logits))
def run_sentiment(model, tokenizer, device):
"""
SECTION : sentiment
DESCRIPTION 1: Running sentiment analysis using comments from 'video_comment.pkl'
DESCRIPTION 2: Calling 'run_model' function to run BERT model
"""
# ====================== Setup ====================== #
pd.options.mode.chained_assignment = None # Off warning messages, default='warn'
starttime = datetime.now()
print(starttime)
# Read comment pickle after running youtube comment api
df = read_pickle('video_comment.pkl')
print("Successfully loaded video comments")
# Cleaning comment after the extraction
df_comment = df[['comment']]
df_comment['comment'] = [re.sub('[^가-힣 ]', '', s) for s in df_comment['comment']]
df_comment['comment'] = df_comment.comment.str.strip()
idx = df_comment[df_comment['comment'] == ''].index
df_comment = df_comment.drop(idx).reset_index(drop=True)
# Perform sentiment analysis using model
print("Started running sentiment analysis")
predict = []
for i in range(len(df_comment)):
score = run_model(sentences=[df_comment['comment'][i]], model=model, tokenizer=tokenizer, device=device)
predict.append(score)
# Converting sentiment scores to language
# result[0] has to be adjusted in the future in API
print("Started putting results into dataframe")
result = pd.DataFrame(predict)
result = result[0].apply(pd.Series)
result = result.merge(df_comment, left_index=True, right_index=True)
result = result.rename(columns={0: 'emotion'}, inplace=False)
result['emotion'] = result['emotion'].apply(sentiment_score)
# ====================== End time ====================== #
endtime = datetime.now()
print(endtime)
timetaken = endtime - starttime
print('Time taken : ' + timetaken.__str__())
return result
# ====================== RUNNING MODEL ====================== #
def run_predict():
"""
SECTION : sentiment
WARNING : Using Google Bucket takes extra 5 minutes. 'load_model_bucket' is depreciated.
USAGE : run_predict()
DESCRIPTION : Running sentinment analysis and store the result into dataframe.
"""
device = setup_device()
# buffer = load_model_bucket()
# model, tokenizer = load_model(path=buffer, device=device)
model, tokenizer = load_model(path='bert_model_gpu_v2.pth', device=device)
result = run_sentiment(model=model, tokenizer=tokenizer, device=device)
return result
```
#### File: src/models/covid_eda.py
```python
import pandas as pd
# ====================== DEFINE FUNCTION ====================== #
def read_pickle(file_name: str) -> pd.DataFrame:
return pd.read_pickle('Pickle/' + file_name)
# ====================== DATA.GO.KR API RUNNING ====================== #
# Korea Government API
#run_covid_api()
# ==================================================================== #
# ====================== LOAD PICKLES ====================== #
df_age_sex = read_pickle('covid_age_sex.pkl')
df_vaccines = read_pickle('covid_vaccines.pkl')
df_city = read_pickle('covid_city.pkl')
df_cases = read_pickle('covid_cases.pkl')
``` |
{
"source": "jinmuovo/python-learning",
"score": 3
} |
#### File: python-learning/06_tool/logger.py
```python
import logging
def get_logger(name, log_file=None, log_level='DEBUG'):
"""
logger
:param name: 模块名称
:param log_file: 日志文件,如无则输出到标准输出
:param log_level: 日志级别
:return:
"""
logger = logging.getLogger(name)
logger.setLevel(log_level.upper())
formatter = logging.Formatter('[%(levelname)7s %(asctime)s %(module)s:%(lineno)d] %(message)s',
datefmt='%Y%m%d %I:%M:%S')
if log_file:
f_handle = logging.FileHandler(log_file)
f_handle.setFormatter(formatter)
logger.addHandler(f_handle)
handle = logging.StreamHandler()
handle.setFormatter(formatter)
logger.addHandler(handle)
return logger
logger = get_logger(__name__, log_file=None, log_level='DEBUG')
def set_log_level(log_level='INFO'):
logger.setLevel(log_level.upper())
if __name__ == '__main__':
logger.debug('hi')
logger.info('hi')
logger.error('hi')
logger.warning('hi')
set_log_level('info')
logger.debug('hi') # ignore
logger.info('hi')
logger.error('hi')
logger.warning('hi')
```
#### File: python-learning/06_tool/profile_demo.py
```python
def is_prime(num):
for factor in range(2, int(num ** 0.5) + 1):
if num % factor == 0:
return False
return True
class PrimeIter:
def __init__(self, total):
self.counter = 0
self.current = 1
self.total = total
def __iter__(self):
return self
def __next__(self):
if self.counter < self.total:
self.current += 1
while not is_prime(self.current):
self.current += 1
self.counter += 1
return self.current
raise StopIteration()
if __name__ == '__main__':
list(PrimeIter(10000))
``` |
{
"source": "jinmyeonglee/LKVOLearner",
"score": 2
} |
#### File: LKVOLearner/cropImage/crop_image.py
```python
from libsmop import *
from get_projection_mask import *
# crop_image.m
# Crops the given image to use only the portion where the projected depth
# image exists.
# Args:
# img - either a HxW image or a HxWxD image.
# Returns:
# img - a cropped version of the image.
@function
def crop_image(img=None,*args,**kwargs):
# varargin = crop_image.varargin
# nargin = crop_image.nargin
mask,sz=get_projection_mask(nargout=2)
# crop_image.m:10
if 2 == ndims(img):
img=reshape(img(mask),sz)
# crop_image.m:13
else:
D=size(img,3)
# crop_image.m:15
img=reshape(img,concat([dot(480,640),D]))
# crop_image.m:16
img=reshape(img(mask,arange()),concat([sz,D]))
# crop_image.m:17
return img
if __name__ == '__main__':
pass
```
#### File: LKVOLearner/cropImage/location_signal_maker.py
```python
from libsmop import *
# location_signal_maker.m
@function
def location_signal_maker(signal_number=None,*args,**kwargs):
# varargin = location_signal_maker.varargin
# nargin = location_signal_maker.nargin
location_signal=zeros(signal_number,2)
mesh_size=ceil(sqrt(dot(2,signal_number)))
mesh_x,mesh_y=meshgrid(arange(- mesh_size + 1,mesh_size - 1),arange(0,mesh_size - 1),nargout=2)
mesh_x=ravel(mesh_x)
mesh_y=ravel(mesh_y)
mesh_dist_value=(mesh_x ** 2 + mesh_y ** 2) + mesh_y / mesh_size + mesh_x / (mesh_size ** 2)
mesh_dist_value=mesh_dist_value + dot(signal_number,double(logical_and((mesh_y == 0),(mesh_x < 0))))
mesh_dist_sort=sort(mesh_dist_value)
for index in arange(1,signal_number).reshape(-1):
index_location=find(mesh_dist_value == mesh_dist_sort(index))
location_signal[index,1]=mesh_y(index_location)
location_signal[index,2]=mesh_x(index_location)
return location_signal
```
#### File: LKVOLearner/DEN/dataset.py
```python
import os
import pickle
from torch.utils import data
import numpy as np
from PIL import Image
from skimage import transform
class NyuV2(data.Dataset):
def __init__(self, root_dir, transform=None):
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(os.listdir(os.path.join(self.root_dir, 'images')))
def __getitem__(self, index):
with open(os.path.join(self.root_dir, 'images', '{:05d}.p'.format(index)), 'rb') as f_img:
img = pickle.load(f_img)
with open(os.path.join(self.root_dir, 'depths', '{:05d}.p'.format(index)), 'rb') as f_depth:
depth = pickle.load(f_depth)
sample = {'image': img, 'depth': depth}
if self.transform:
sample = self.transform(sample)
return sample
class KITTIdataset(data.Dataset):
"""KITTIdataset"""
def __init__(self, list_file='train.txt', data_root_path='/data/prepared_raw_kitti', img_size=[128, 416], bundle_size=3, transform=None, isDen=True):
self.gt_root_path='/data/prepared_annotated_kitti'
self.data_root_path = data_root_path
self.img_size = img_size
self.bundle_size = bundle_size
self.frame_pathes = []
self.transform = transform
self.isDen = isDen
list_file = os.path.join(data_root_path, list_file)
with open(list_file) as file:
for line in file:
frame_path = line.strip()
seq_path, frame_name = frame_path.split(" ")
if seq_path in ['2011_09_26_drive_0119_sync_02', '2011_09_28_drive_0225_sync_02',
'2011_09_29_drive_0108_sync_02', '2011_09_30_drive_0072_sync_02',
'2011_10_03_drive_0058_sync_02', '2011_09_29_drive_0108_sync_03']:
print(seq_path)
continue
frame_path = os.path.join(seq_path, frame_name)
self.frame_pathes.append(frame_path)
def __len__(self):
return len(self.frame_pathes)
def __getitem__(self, item):
# read camera intrinsics
cam_file = os.path.join(self.data_root_path, self.frame_pathes[item]+'_cam.txt')
with open(cam_file) as file:
cam_intrinsics = [float(x) for x in next(file).split(',')]
camparams = np.asarray(cam_intrinsics)
# read image bundle
img_file = os.path.join(self.data_root_path, self.frame_pathes[item]+'.jpg')
seq, frame = self.frame_pathes[item].split("/")
gt_file = os.path.join(self.gt_root_path,self.frame_pathes[item]+'.jpg')
frames_cat = np.array(Image.open(img_file))
depth_cat = np.array(Image.open(gt_file))
depth_cat = np.expand_dims(depth_cat,axis=2)
#depth_cat = transform.resize(depth_cat, frames_cat.shape, mode='reflect', anti_aliasing=True)
# slice the image into #bundle_size number of images
frame_list = []
depth_list = []
print(gt_file)
for i in range(self.bundle_size):
frame_list.append(frames_cat[:,i*self.img_size[1]:(i+1)*self.img_size[1],:]) #crop image by (height * 416)*3
depth_list.append(depth_cat[:,i*self.img_size[1]:(i+1)*self.img_size[1],:])
#print(frame_list[i].shape, depth_list[i].shape)
#print(frame_list[i].shape, depth_list[i].shape)
#frames = np.asarray(frame_list).astype(float).transpose(0, 3, 1, 2)
frames = np.asarray(frame_list).astype(float)
#depth = np.asarray(depth_list).astype(float)
sample = {'frames': frames, 'depth':depth_list}
if self.transform:
sample = self.transform(sample) # : 3*128*416->3*4*128*416
if self.isDen:
return sample
else:
frames = np.asarray(frame_list).astype(float).transpose(0, 3, 1, 2)
lkvosample = {'frames': frames, 'stacked_images': sample['stacked_images']}
return lkvosample, camparams
```
#### File: LKVOLearner/DEN/prepare_gt_data.py
```python
from __future__ import division
import argparse
import scipy.misc
import numpy as np
from glob import glob
from joblib import Parallel, delayed
import os
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_dir", type=str, required=True, help="where the dataset is stored")
parser.add_argument("--dump_root", type=str, required=True, help="Where to dump the data")
parser.add_argument("--seq_length", type=int, required=True, help="Length of each training sequence")
parser.add_argument("--img_height", type=int, default=128, help="image height")
parser.add_argument("--img_width", type=int, default=416, help="image width")
parser.add_argument("--num_threads", type=int, default=4, help="number of threads to use")
args = parser.parse_args()
def concat_image_seq(seq):
res = None
for i, im in enumerate(seq):
if i == 0:
res = im
else:
res = np.hstack((res, im))
return res
def dump_example(n, args):
if n % 2000 == 0:
print('Progress %d/%d....' % (n, data_loader.num_train))
example = data_loader.get_train_example_with_idx(n)
if example == False:
return
if example['image_seq'] is None:
print(example['file_name'])
raise Exception
image_seq = concat_image_seq(example['image_seq'])
dump_dir = os.path.join(args.dump_root, example['folder_name'])
# if not os.path.isdir(dump_dir):
# os.makedirs(dump_dir, exist_ok=True)
try:
os.makedirs(dump_dir)
except OSError:
if not os.path.isdir(dump_dir):
raise
dump_img_file = dump_dir + '/%s.jpg' % example['file_name']
try:
scipy.misc.imsave(dump_img_file, image_seq.astype(np.uint8))
print(dump_img_file, "saved!")
except Exception as E:
print("There is no", dump_img_file)
print(E)
def main():
if not os.path.exists(args.dump_root):
os.makedirs(args.dump_root)
global data_loader
from kitti_gt_loader import kitti_gt_loader
data_loader = kitti_gt_loader(args.dataset_dir,
split='eigen',
img_height=args.img_height,
img_width=args.img_width,
seq_length=args.seq_length)
Parallel(n_jobs=args.num_threads)(delayed(dump_example)(n, args) for n in range(data_loader.num_train))
# Split into train/val
# subfolders = os.listdir(args.dump_root)
# with open(args.dump_root + 'train.txt', 'w') as tf:
# with open(args.dump_root + 'val.txt', 'w') as vf:
# for s in subfolders:
# if not os.path.isdir(args.dump_root + '/%s' % s):
# continue
# imfiles = glob(os.path.join(args.dump_root, s, '*.jpg'))
# frame_ids = [os.path.basename(fi).split('.')[0] for fi in imfiles]
# for frame in frame_ids:
# if np.random.random() < 0.1:
# vf.write('%s %s\n' % (s, frame))
# else:
# tf.write('%s %s\n' % (s, frame)) 깔깔!ㅉㅉ
main()
```
#### File: LKVOLearner/src/LKVOLearnerFinetune.py
```python
from DirectVOLayer import DirectVO
from networks import VggDepthEstimator, PoseNet, FDCDepthEstimator
from ImagePyramid import ImagePyramidLayer
import torch.nn as nn
import torch
import numpy as np
from torch.autograd import Variable
from timeit import default_timer as timer
class FlipLR(nn.Module):
def __init__(self, imW, dim_w):
super(FlipLR, self).__init__()
inv_indices = torch.arange(imW-1, -1, -1).long()
self.register_buffer('inv_indices', inv_indices)
self.dim_w = dim_w
def forward(self, input):
return input.index_select(self.dim_w, Variable(self.inv_indices))
class LKVOLearner(nn.Module):
def __init__(self, img_size=[128, 416], ref_frame_idx=1, lambda_S=.5, use_ssim=True, smooth_term = 'lap', gpu_ids=[4]):
super(LKVOLearner, self).__init__()
self.lkvo = nn.DataParallel(LKVOKernel(img_size, smooth_term = smooth_term), device_ids=gpu_ids)
self.ref_frame_idx = ref_frame_idx
self.lambda_S = lambda_S
self.use_ssim = use_ssim
def forward(self, frames, camparams, max_lk_iter_num=10, lk_level=1):
cost, photometric_cost, smoothness_cost, ref_frame, ref_inv_depth \
= self.lkvo.forward(frames, camparams, self.ref_frame_idx, self.lambda_S, max_lk_iter_num=max_lk_iter_num, use_ssim=self.use_ssim, lk_level=lk_level)
return cost.mean(), photometric_cost.mean(), smoothness_cost.mean(), ref_frame, ref_inv_depth
def save_model(self, file_path):
torch.save(self.cpu().lkvo.module.depth_net.state_dict(),
file_path)
self.cuda()
def load_model(self, depth_net_file_path, pose_net_file_path):
self.lkvo.module.depth_net.load_state_dict(torch.load(depth_net_file_path))
self.lkvo.module.pose_net.load_state_dict(torch.load(pose_net_file_path))
def init_weights(self):
self.lkvo.module.depth_net.init_weights()
def get_parameters(self):
return self.lkvo.module.depth_net.parameters()
class LKVOKernel(nn.Module):
"""
only support single training isinstance
"""
def __init__(self, img_size=[128, 416], smooth_term = 'lap'):
super(LKVOKernel, self).__init__()
self.img_size = img_size
self.fliplr_func = FlipLR(imW=img_size[1], dim_w=3)
self.vo = DirectVO(imH=img_size[0], imW=img_size[1], pyramid_layer_num=4)
self.pose_net = PoseNet(3)
#self.depth_net = VggDepthEstimator(img_size)
self.depth_net = FDCDepthEstimator(img_size)
self.pyramid_func = ImagePyramidLayer(chan=1, pyramid_layer_num=4)
self.smooth_term = smooth_term
def forward(self, frames, camparams, ref_frame_idx, lambda_S=.5, do_data_augment=True, use_ssim=True, max_lk_iter_num=10, lk_level=1):
assert(frames.size(0) == 1 and frames.dim() == 5)
frames = frames.squeeze(0)
camparams = camparams.squeeze(0).data
if do_data_augment:
if np.random.rand()>.5:
# print("fliplr")
frames = self.fliplr_func(frames)
camparams[2] = self.img_size[1] - camparams[2]
# camparams[5] = self.img_size[0] - camparams[5]
bundle_size = frames.size(0)
src_frame_idx = tuple(range(0,ref_frame_idx)) + tuple(range(ref_frame_idx+1,bundle_size))
# ref_frame = frames[ref_frame_idx, :, :, :]
# src_frames = frames[src_frame_idx, :, :, :]
frames_pyramid = self.vo.pyramid_func(frames)
ref_frame_pyramid = [frame[ref_frame_idx, :, :, :] for frame in frames_pyramid]
src_frames_pyramid = [frame[src_frame_idx, :, :, :] for frame in frames_pyramid]
self.vo.setCamera(fx=camparams[0], cx=camparams[2],
fy=camparams[4], cy=camparams[5])
inv_depth_pyramid = self.depth_net.forward((frames-127)/127)
inv_depth_mean_ten = inv_depth_pyramid[0].mean()*0.1
inv_depth_norm_pyramid = [depth/inv_depth_mean_ten for depth in inv_depth_pyramid]
inv_depth0_pyramid = self.pyramid_func(inv_depth_norm_pyramid[0], do_detach=False)
ref_inv_depth_pyramid = [depth[ref_frame_idx, :, :] for depth in inv_depth_norm_pyramid]
ref_inv_depth0_pyramid = [depth[ref_frame_idx, :, :] for depth in inv_depth0_pyramid]
src_inv_depth_pyramid = [depth[src_frame_idx, :, :] for depth in inv_depth_norm_pyramid]
src_inv_depth0_pyramid = [depth[src_frame_idx, :, :] for depth in inv_depth0_pyramid]
self.vo.init(ref_frame_pyramid=ref_frame_pyramid, inv_depth_pyramid=ref_inv_depth0_pyramid)
# init_pose with pose CNN
p = self.pose_net.forward((frames.view(1, -1, frames.size(2), frames.size(3))-127) / 127)
rot_mat_batch = self.vo.twist2mat_batch_func(p[0,:,0:3]).contiguous()
trans_batch = p[0,:,3:6].contiguous()#*inv_depth_mean_ten
# fine tune pose with direct VO
rot_mat_batch, trans_batch = self.vo.update_with_init_pose(src_frames_pyramid[0:lk_level], max_itr_num=max_lk_iter_num, rot_mat_batch=rot_mat_batch, trans_batch=trans_batch)
# rot_mat_batch, trans_batch = \
# self.vo.forward(ref_frame_pyramid, src_frames_pyramid, ref_inv_depth0_pyramid, max_itr_num=max_lk_iter_num)
photometric_cost = self.vo.compute_phtometric_loss(self.vo.ref_frame_pyramid, src_frames_pyramid, ref_inv_depth_pyramid, src_inv_depth_pyramid, rot_mat_batch, trans_batch, levels=[0,1,2,3], use_ssim=use_ssim)
smoothness_cost = self.vo.multi_scale_image_aware_smoothness_cost(inv_depth0_pyramid, frames_pyramid, levels=[2,3], type=self.smooth_term) \
+ self.vo.multi_scale_image_aware_smoothness_cost(inv_depth_norm_pyramid, frames_pyramid, levels=[2,3], type=self.smooth_term)
cost = photometric_cost + lambda_S*smoothness_cost
return cost, photometric_cost, smoothness_cost, self.vo.ref_frame_pyramid[0], ref_inv_depth0_pyramid[0]*inv_depth_mean_ten
if __name__ == "__main__":
from KITTIdataset import KITTIdataset
from torch.utils.data import DataLoader
from torch import optim
from torch.autograd import Variable
dataset = KITTIdataset()
dataloader = DataLoader(dataset, batch_size=3,
shuffle=True, num_workers=2, pin_memory=True)
lkvolearner = LKVOLearner(gpu_ids = [0])
def weights_init(m):
classname = m.__class__.__name__
if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.ConvTranspose2d):
# m.weight.data.normal_(0.0, 0.02)
m.bias.data = torch.zeros(m.bias.data.size())
lkvolearner.apply(weights_init)
lkvolearner.cuda()
optimizer = optim.Adam(lkvolearner.parameters(), lr=.0001)
for ii, data in enumerate(dataloader):
t = timer()
optimizer.zero_grad()
frames = Variable(data[0].float().cuda())
# print(data[1])
camparams = Variable(data[1])
a = lkvolearner.forward(frames, camparams)
print(timer()-t)
# print(a)
```
#### File: LKVOLearner/src/networks.py
```python
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.autograd import Variable
import numpy as np
from skimage import transform
import sys
sys.path.insert(0, '/root/LKVOLearner/DEN')
import modeling
import fdc
import den
sys.path.insert(0,"~/LKVOLearner/src/util")
import util
DISP_SCALING = 10
MIN_DISP = 0.01
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class ConvBlock(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size):
super(ConvBlock, self).__init__()
p = int(np.floor((kernel_size - 1) / 2))
self.activation_fn = nn.ELU()
self.conv1 = Conv(input_nc, output_nc, kernel_size, 1, p, self.activation_fn)
# self.conv2 = Conv(output_nc, output_nc, kernel_size, 2, p)
self.conv2 = Conv(output_nc, output_nc, kernel_size, 1, p, None)
def forward(self, input):
x = self.conv1(input)
x = self.conv2(x)
padding = [0, int(np.mod(input.size(-1), 2)), 0, int(np.mod(input.size(-2), 2))]
x_pad = torch.nn.ReplicationPad2d(padding)(x)
return torch.nn.AvgPool2d(kernel_size=2, stride=2, padding=0)(self.activation_fn(x_pad))
class UpConv(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size):
super(UpConv, self).__init__()
self.deconv = nn.ConvTranspose2d(in_channels=input_nc,
out_channels=output_nc,
kernel_size=2,
bias=True,
stride=2,
padding=0)
self.activation_fn = nn.ELU()
def forward(self, input):
return self.activation_fn(self.deconv(input))
class Conv(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size, stride, padding, activation_func=nn.ELU()):
super(Conv, self).__init__()
self.conv = nn.Conv2d(in_channels=input_nc,
out_channels=output_nc,
kernel_size=kernel_size,
stride=stride,
padding=0,
bias=True)
self.activation_fn = activation_func
self.pad_fn = nn.ReplicationPad2d(padding)
def forward(self, input):
if self.activation_fn == None:
return self.conv(self.pad_fn(input))
else:
return self.activation_fn(self.conv(self.pad_fn(input)))
class FDCInverseDepthMap(fdc.FDC):
def getInverseDepthMap(self, batch):
predictions = fdc.FDC.__call__(self, batch)
# print("predictions shape: ", len(predictions))
for i in range(len(predictions)):
for j in range(predictions[i].shape[0]):
predictions[i][j] = torch.tensor(list(map(lambda x: 0 if 1 / x == float('inf') else 1 / x, predictions[i][j])))
return predictions
class VggDepthEstimator(nn.Module):
def __init__(self, input_size=None):
super(VggDepthEstimator, self).__init__()
self.conv_layers = nn.ModuleList([ConvBlock(3, 32, 7)])
self.conv_layers.append(ConvBlock(32, 64, 5))
self.conv_layers.append(ConvBlock(64, 128, 3))
self.conv_layers.append(ConvBlock(128, 256, 3))
self.conv_layers.append(ConvBlock(256, 512, 3))
self.conv_layers.append(ConvBlock(512, 512, 3))
self.conv_layers.append(ConvBlock(512, 512, 3))
# print(conv_feat_sizes)
self.upconv_layers = nn.ModuleList([UpConv(512, 512, 3)])
self.iconv_layers = nn.ModuleList([Conv(512*2, 512, 3, 1, 1)])
self.upconv_layers.append(UpConv(512, 512, 3))
self.iconv_layers.append(Conv(512*2, 512, 3, 1, 1))
self.invdepth_layers = nn.ModuleList([Conv(512, 1, 3, 1, 1, nn.Sigmoid())])
self.upconv_layers.append(UpConv(512, 256, 3))
self.iconv_layers.append(Conv(256*2, 256, 3, 1, 1))
self.invdepth_layers.append(Conv(256, 1, 3, 1, 1, nn.Sigmoid()))
self.upconv_layers.append(UpConv(256, 128, 3))
self.iconv_layers.append(Conv(128*2, 128, 3, 1, 1))
self.invdepth_layers.append(Conv(128, 1, 3, 1, 1, nn.Sigmoid()))
self.upconv_layers.append(UpConv(128, 64, 3))
self.iconv_layers.append(Conv(64*2+1, 64, 3, 1, 1))
self.invdepth_layers.append(Conv(64, 1, 3, 1, 1, nn.Sigmoid()))
self.upconv_layers.append(UpConv(64, 32, 3))
self.iconv_layers.append(Conv(32*2+1, 32, 3, 1, 1))
self.invdepth_layers.append(Conv(32, 1, 3, 1, 1, nn.Sigmoid()))
self.upconv_layers.append(UpConv(32, 16, 3))
self.iconv_layers.append(Conv(16+1, 16, 3, 1, 1))
self.invdepth_layers.append(Conv(16, 1, 3, 1, 1, nn.Sigmoid()))
# self.invdepth_layers.append(InvDepth(16))
def init_weights(self):
def weights_init(m):
classname = m.__class__.__name__
if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.ConvTranspose2d):
# m.weight.data.normal_(0.0, 0.02)
m.bias.data = torch.zeros(m.bias.data.size())
self.apply(weights_init)
def forward(self, input):
conv_feat = self.conv_layers[0].forward(input)
self.conv_feats = [conv_feat]
for i in range(1, len(self.conv_layers)):
conv_feat = self.conv_layers[i].forward(self.conv_feats[i-1])
self.conv_feats.append(conv_feat)
upconv_feats = []
invdepth_pyramid = []
for i in range(0, len(self.upconv_layers)):
if i==0:
x = self.upconv_layers[i].forward(self.conv_feats[-1])
else:
x = self.upconv_layers[i].forward(upconv_feats[i-1])
if i<len(self.upconv_layers)-1:
if x.size(-1) != self.conv_feats[-2-i].size(-1):
x = x[:, :, :, :-1]
if x.size(-2) != self.conv_feats[-2-i].size(-2):
x = x[:, :, :-1, :]
if i==(len(self.upconv_layers)-1):
x = torch.cat((x, nn.Upsample(scale_factor=2, mode='bilinear')(invdepth_pyramid[-1])), 1)
elif i > 3:
x = torch.cat((x, self.conv_feats[-(2+i)], nn.Upsample(scale_factor=2, mode='bilinear')(invdepth_pyramid[-1])), 1)
else:
x = torch.cat((x, self.conv_feats[-(2+i)]), 1)
upconv_feats.append(self.iconv_layers[i].forward(x))
if i>0:
# invdepth_pyramid.append(self.invdepth_layers[i-1].forward(upconv_feats[-1])*DISP_SCALING+MIN_DISP)
invdepth_pyramid.append(self.invdepth_layers[i-1].forward(upconv_feats[-1]))
# invdepth_pyramid.append(self.invdepth_layers[i-1].forward(upconv_feats[-1]))
invdepth_pyramid = invdepth_pyramid[-1::-1]
invdepth_pyramid = invdepth_pyramid[0:5]
# conv_feats_output = conv_feats_output[0:5]
for i in range(len(invdepth_pyramid)):
# *10 + 0.01
print(i,': before squeeze',invdepth_pyramid[i].shape)
invdepth_pyramid[i] = invdepth_pyramid[i].squeeze(1)*DISP_SCALING+MIN_DISP
print(i,': after squeeze',invdepth_pyramid[i].shape)
print()
return invdepth_pyramid
class FDCDepthEstimator(nn.Module):
def __init__(self, input_size=None):
super(FDCDepthEstimator, self).__init__()
den_ = den.DEN()
den_ = den_.to(device)
den_.eval()
self.fdc_model = FDCInverseDepthMap(den_)
self.fdc_model.load_weights('/root/LKVOLearner/DEN/models/FDC/den_dbe/')
def forward(self, input):
sizes = [(128, 416), (64, 208), (32, 104), (16, 52), (8, 26)]
invdepth_pyramid = [[] for _ in range(len(sizes))]
origin_indepth_map = self.fdc_model.getInverseDepthMap(input)
# util.save_image(origin_indepth_map, "/data/log/checkpoints/origin_invdepth_map_%d.mat" % (self.index))
# self.index += 1
for i in range(len(sizes)): # num_pyrimid: 5
for j in range(len(origin_indepth_map)):
invdepth_pyramid[i].append(transform.resize(origin_indepth_map[j].numpy(), sizes[i], mode='reflect',
anti_aliasing=True, preserve_range=True).astype('float32'))
invdepth_pyramid[i] = torch.tensor(invdepth_pyramid[i]).cuda()
invdepth_pyramid[i] = invdepth_pyramid[i]*DISP_SCALING+MIN_DISP
return invdepth_pyramid
def init_weights(self):
# already loaded in intializing.
pass
class PoseNet(nn.Module):
def __init__(self, bundle_size):
super(PoseNet, self).__init__()
self.bundle_size = bundle_size
model = [nn.Conv2d(bundle_size*3, 16, kernel_size=7, stride=2, padding=3, bias=True),
nn.ReLU(True),
nn.Conv2d(16, 32, kernel_size=5, stride=2, padding=2, bias=True),
nn.ReLU(True),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(256, 6*(bundle_size-1), kernel_size=3, stride=2, padding=1, bias=True)
]
self.model = nn.Sequential(*model)
def forward(self, input):
assert(self.bundle_size*3 == input.size(1))
p = self.model.forward(input)
p = p.view(input.size(0), 6*(self.bundle_size-1), -1).mean(2)
return p.view(input.size(0), self.bundle_size-1, 6) * 0.01
class PoseExpNet(nn.Module):
def __init__(self, bundle_size):
super(PoseExpNet, self).__init__()
self.bundle_size = bundle_size
self.convlyr1 = nn.Sequential(*[nn.Conv2d(bundle_size*3, 16, kernel_size=7, stride=2, padding=3, bias=True),
nn.ReLU(True)])
self.convlyr2 = nn.Sequential(*[nn.Conv2d(16, 32, kernel_size=5, stride=2, padding=2, bias=True),
nn.ReLU(True)])
self.convlyr3 = nn.Sequential(*[nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True)])
self.convlyr4 = nn.Sequential(*[nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True)])
self.convlyr5 = nn.Sequential(*[nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True)])
self.poselyr = nn.Sequential(*[nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(256, 6*(bundle_size-1), kernel_size=3, stride=2, padding=1, bias=True)])
self.uplyr5 = nn.Sequential(*[nn.ConvTranspose2d(in_channels=256,
out_channels=256,
kernel_size=2,
bias=True,
stride=2,
padding=0),
nn.ReLU(True)])
self.uplyr4 = nn.Sequential(*[nn.ConvTranspose2d(in_channels=256,
out_channels=128,
kernel_size=2,
bias=True,
stride=2,
padding=0),
nn.ReLU(True)])
self.uplyr3 = nn.Sequential(*[nn.ConvTranspose2d(in_channels=128,
out_channels=64,
kernel_size=2,
bias=True,
stride=2,
padding=0),
nn.ReLU(True)])
self.uplyr2 = nn.Sequential(*[nn.ConvTranspose2d(in_channels=64,
out_channels=32,
kernel_size=2,
bias=True,
stride=2,
padding=0),
nn.ReLU(True)])
self.uplyr1 = nn.Sequential(*[nn.ConvTranspose2d(in_channels=32,
out_channels=16,
kernel_size=2,
bias=True,
stride=2,
padding=0),
nn.ReLU(True)])
self.explyr4 = nn.Sequential(*[nn.Conv2d(128, bundle_size, kernel_size=3,
stride=1, padding=1, bias=True),
nn.Sigmoid()])
self.explyr3 = nn.Sequential(*[nn.Conv2d(64, bundle_size, kernel_size=3,
stride=1, padding=1, bias=True),
nn.Sigmoid()])
self.explyr2 = nn.Sequential(*[nn.Conv2d(32, bundle_size, kernel_size=3,
stride=1, padding=1, bias=True),
nn.Sigmoid()])
self.explyr1 = nn.Sequential(*[nn.Conv2d(16, bundle_size, kernel_size=3,
stride=1, padding=1, bias=True),
nn.Sigmoid()])
def forward(self, input):
conv1 = self.convlyr1(input)
conv2 = self.convlyr2(conv1)
conv3 = self.convlyr3(conv2)
conv4 = self.convlyr4(conv3)
conv5 = self.convlyr5(conv4)
# output pose
p = self.poselyr.forward(conv5)
p = p.view(input.size(0), 6*(self.bundle_size-1), -1).mean(2)
# multiply predicted pose with a small constant
p = p.view(input.size(0), self.bundle_size-1, 6) * 0.01
# predict multi-scale explainable mask
upcnv5 = self.uplyr5(conv5)
upcnv4 = self.uplyr4(upcnv5)
upcnv3 = self.uplyr3(upcnv4)
upcnv2 = self.uplyr2(upcnv3)
upcnv1 = self.uplyr1(upcnv2)
mask4 = self.explyr4(upcnv4)
mask3 = self.explyr3(upcnv3)
mask2 = self.explyr2(upcnv2)
mask1 = self.explyr1(upcnv1)
return p, [mask1, mask2, mask3, mask4]
if __name__ == "__main__":
model = PoseExpNet(3).cuda()
x = Variable(torch.randn(1,9,128,416).cuda())
p, masks = model.forward(x)
for i in range(4):
print(masks[i].size())
dnet = VggDepthEstimator([128,416]).cuda()
I = Variable(torch.randn(1,3,128,416).cuda())
invdepth_pyramid = dnet.forward(I)
for i in range(len(invdepth_pyramid)):
print(invdepth_pyramid[i].size())
``` |
{
"source": "JinnaBalu/vibhuvi-infinite-containers",
"score": 3
} |
#### File: jupyter/python-sample/scrape-justdial.py
```python
from bs4 import BeautifulSoup
import urllib
import request
import urllib.request
import requests
import csv
def innerHTML(element):
return element.decode_contents(formatter="html")
def get_name(body):
return body.find('span', {'class':'jcn'}).a.string
def which_digit(html):
mappingDict={'icon-ji':9,
'icon-dc':'+',
'icon-fe':'(',
'icon-hg':')',
'icon-ba':'-',
'icon-lk':8,
'icon-nm':7,
'icon-po':6,
'icon-rq':5,
'icon-ts':4,
'icon-vu':3,
'icon-wx':2,
'icon-yz':1,
'icon-acb':0,
}
return mappingDict.get(html,'')
def get_phone_number(body):
i=0
phoneNo = "No Number!"
try:
for item in body.find('p',{'class':'contact-info'}):
i+=1
if(i==2):
phoneNo=''
try:
for element in item.find_all(class_=True):
classes = []
classes.extend(element["class"])
phoneNo+=str((which_digit(classes[1])))
except:
pass
except:
pass
body = body['data-href']
soup = BeautifulSoup(body, 'html.parser')
for a in soup.find_all('a', {"id":"whatsapptriggeer"} ):
# print (a)
phoneNo = str(a['href'][-10:])
return phoneNo
def get_rating(body):
rating = 0.0
text = body.find('span', {'class':'star_m'})
if text is not None:
for item in text:
rating += float(item['class'][0][1:])/10
return rating
def get_rating_count(body):
text = body.find('span', {'class':'rt_count'}).string
# Get only digits
rating_count =''.join(i for i in text if i.isdigit())
return rating_count
def get_address(body):
return body.find('span', {'class':'mrehover'}).text.strip()
def get_location(body):
text = body.find('a', {'class':'rsmap'})
if text == None:
return
text_list = text['onclick'].split(",")
latitutde = text_list[3].strip().replace("'", "")
longitude = text_list[4].strip().replace("'", "")
return latitutde + ", " + longitude
page_number = 1
service_count = 1
fields = ['Name', 'Phone', 'Rating', 'Rating Count', 'Address', 'Location']
out_file = open('hardware.csv','w')
csvwriter = csv.DictWriter(out_file, delimiter=',', fieldnames=fields)
# Write fields first
#csvwriter.writerow(dict((fn,fn) for fn in fields))
while True:
# Check if reached end of result
if page_number > 50:
break
url="https://www.justdial.com/Medak/Hardware-Shops/nct-10243514/page-%s" % (page_number)
print(url)
req = urllib.request.Request(url, headers={'User-Agent' : "Mozilla/5.0 (Windows NT 6.1; Win64; x64)"})
page = urllib.request.urlopen( req )
# page=urllib2.urlopen(url)
soup = BeautifulSoup(page.read(), "html.parser")
services = soup.find_all('li', {'class': 'cntanr'})
# Iterate through the 10 results in the page
for service_html in services:
# Parse HTML to fetch data
dict_service = {}
name = get_name(service_html)
print(name);
phone = get_phone_number(service_html)
rating = get_rating(service_html)
count = get_rating_count(service_html)
address = get_address(service_html)
location = get_location(service_html)
if name != None:
dict_service['Name'] = name
if phone != None:
print('getting phone number')
dict_service['Phone'] = phone
if rating != None:
dict_service['Rating'] = rating
if count != None:
dict_service['Rating Count'] = count
if address != None:
dict_service['Address'] = address
if location != None:
dict_service['Address'] = location
# Write row to CSV
csvwriter.writerow(dict_service)
print("#" + str(service_count) + " " , dict_service)
service_count += 1
page_number += 1
out_file.close()
``` |
{
"source": "JinneGeelen/ML-MoCap",
"score": 3
} |
#### File: ML-MoCap/camera/app.py
```python
import asyncio
import threading
import websockets
import os
import json
import logging
import signal
import time
from datetime import datetime, timedelta
from camera import CameraController
logging.basicConfig(level=logging.INFO)
camera_controller = CameraController()
logger = logging.getLogger()
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
async def main():
killer = GracefulKiller()
camera_controller.start_uv4l()
uri = 'ws://{}:3000/v1/cameras/connect/{}'.format(
os.environ['CONTROLLER_HOST'], os.environ['ID'])
async with websockets.connect(uri) as websocket:
logger.info('Connected to {}'.format(uri))
while not websocket.closed and not killer.kill_now:
data = await websocket.recv()
message = json.loads(data)
if 'error' in message:
logger.error(
"Error received from controller: {}".format(message['error']))
continue
if not 'event' in message:
logger.error(
'Received message from controller with no "event" in it...')
continue
event_method = 'on_{}'.format(message.get('event'))
logger.info("Received event {} with data {}".format(
event_method, message.get('data')))
threading.Thread(target=getattr(camera_controller, event_method),
args=[message.get('data')],
daemon=True).start()
logger.info('Disconnected from websocket')
await camera_controller.force_stop()
# Start the application
if __name__ == '__main__':
logger.info('Starting event loop...')
asyncio.run(main())
```
#### File: controller/controllers/participants.py
```python
import logging
import sqlalchemy
import shortuuid
import asyncio
import time
from datetime import datetime, timedelta
from rx.subject import Subject
from db import get_db, metadata
from controllers.controller import Controller
logger = logging.getLogger()
participants = sqlalchemy.Table('participants', metadata,
sqlalchemy.Column(
'id', sqlalchemy.String, primary_key=True),
sqlalchemy.Column(
'study_id', sqlalchemy.ForeignKey('studies.id')),
sqlalchemy.Column('number', sqlalchemy.String),
sqlalchemy.Column(
'dominant_hand', sqlalchemy.String),
sqlalchemy.Column('age', sqlalchemy.Integer),
sqlalchemy.Column('gender', sqlalchemy.String),
sqlalchemy.Column(
'consent_video', sqlalchemy.Boolean),
)
class ParticipantsController(Controller):
def parse_row(self, row):
return {
'id': row['id'],
'study_id': row['study_id'],
'number': row['number'],
'dominant_hand': row['dominant_hand'],
'age': row['age'],
'gender': row['gender'],
'consent_video': row['consent_video'],
}
async def get_all(self):
query = participants.select()
results = await self.db.fetch_all(query)
return [self.parse_row(result) for result in results]
async def create(self, participant):
participant_id = shortuuid.uuid()
query = participants.insert().values(
id=participant_id,
study_id=participant.get('study_id'),
number=participant.get('number'),
dominant_hand=participant.get('dominant_hand'),
age=participant.get('age'),
gender=participant.get('gender'),
consent_video=participant.get('consent_video'),
)
await self.db.execute(query)
participant = await self.get(participant_id)
await self.broadcast({
'event': 'create',
'entity': participant
})
return participant
async def get(self, participant_id):
query = participants.select().where(participants.c.id == participant_id)
participant = await self.db.fetch_one(query)
if participant:
return self.parse_row(participant)
async def update(self, participant):
query = participants.update().where(participants.c.id == participant.get('id')).values(
id=participant.get('id'),
study_id=participant.get('study_id'),
number=participant.get('number'),
dominant_hand=participant.get('dominant_hand'),
age=participant.get('age'),
gender=participant.get('gender'),
consent_video=participant.get('consent_video'),
)
await self.db.execute(query)
participant = await self.get(participant.get('id'))
await self.broadcast({
'event': 'update',
'entity': participant
})
return participant
async def delete(self, participant_id):
participant = await self.get(participant_id)
query = participants.delete().where(participants.c.id == participant_id)
await self.db.execute(query)
await self.broadcast({
'event': 'delete',
'entity': participant
})
return participant
participant_controller = ParticipantsController()
```
#### File: controller/controllers/recordings.py
```python
import logging
import sqlalchemy
import shortuuid
import json
import os
import time
import asyncio
import threading
import shutil
from datetime import datetime, timedelta, timezone
from rx.subject import Subject
from db import get_db, metadata
from config import LOCAL_STORAGE_PATH, REMOTE_STORAGE_PATH
from controllers.controller import Controller
from controllers.participants import participant_controller
from controllers.cameras import camera_controller
from controllers.studies import studies_controller
from gpiozero import LED
#trigger for TMSi Porti, pin1 = 3.3V (GND = pin6)
trigger = LED("GPIO21") #LED("BOARD40")
led_red = LED("GPIO5") #LED("BOARD29")
led_yellow = LED("GPIO6") #LED("BOARD31")
led_green = LED("GPIO13") #LED("BOARD33")
led_blue = LED("GPIO19") #LED("BOARD35")
led_white = LED("GPIO26") #LED("BOARD")
logging.basicConfig(level=logging.DEBUG,
format='%(relativeCreated)6d %(threadName)s %(message)s')
logger = logging.getLogger()
database = get_db()
recordings = sqlalchemy.Table('recordings', metadata,
sqlalchemy.Column(
'id', sqlalchemy.String, primary_key=True),
sqlalchemy.Column(
'participant_id', sqlalchemy.ForeignKey('participants.id')),
sqlalchemy.Column('name', sqlalchemy.String),
sqlalchemy.Column(
'file_path', sqlalchemy.String),
sqlalchemy.Column(
'start_time', sqlalchemy.String),
sqlalchemy.Column('end_time', sqlalchemy.String),
sqlalchemy.Column('state', sqlalchemy.String),
sqlalchemy.Column(
'cameras_recorded', sqlalchemy.String),
sqlalchemy.Column(
'cameras_processing', sqlalchemy.String),
sqlalchemy.Column(
'cameras_processed', sqlalchemy.String),
)
class RecordingController(Controller):
sessions = {}
observable = Subject()
def parse_row(self, row):
return {
'id': row['id'],
'participant_id': row['participant_id'],
'name': row['name'],
'file_path': row['file_path'],
'start_time': row['start_time'],
'end_time': row['end_time'],
'state': row['state'],
'cameras_recorded': json.loads(row['cameras_recorded']),
'cameras_processing': json.loads(row['cameras_processing']),
'cameras_processed': json.loads(row['cameras_processed']),
}
async def get_recording_metadata(self, recording):
participant = await participant_controller.get(recording.get('participant_id'))
study = await studies_controller.get(participant.get('study_id'))
return {
'recording_name': recording.get('name'),
'start_time': recording.get('start_time'),
'end_time': recording.get('end_time'),
'participant_number': participant.get('number'),
'study_name': study.get('name'),
'researcher': study.get('researcher'),
}
async def get_recording_path(self, recording):
participant = await participant_controller.get(recording.get('participant_id'))
study = await studies_controller.get(participant.get('study_id'))
path = '{}_{}_{}'.format(study.get('name'),
participant.get('number'),
recording.get('name'))
return path.replace(' ', '_').replace(':', '_').replace('-', '_')
async def get_local_storage_path(self, recording):
recording_path = await self.get_recording_path(recording)
return '{}/{}'.format(LOCAL_STORAGE_PATH, recording_path)
async def get_remote_storage_path(self, recording):
recording_path = await self.get_recording_path(recording)
return '{}/{}'.format(REMOTE_STORAGE_PATH, recording_path)
async def get_camera_file_path(self, recording, camera_id):
local_path = await self.get_local_storage_path(recording)
camera = await camera_controller.get(camera_id)
return '{}_{}.mp4'.format(local_path, camera.get('name', camera_id).replace(' ', '_'))
#new start
async def sendtrigger(self, recording):
start_time = recording.get('start_time')
current_time = datetime.now().astimezone()
while current_time < start_time and self.recording:
time.sleep(0.001)
current_time = datetime.now().astimezone()
trigger.on()
time.sleep(2)
trigger.off()
#new end
async def start(self, recording_id):
async with self.db.transaction():
recording = await self.get(recording_id)
if not recording:
return
recording['start_time'] = (
datetime.now() + timedelta(seconds=3)).astimezone().isoformat()
recording['end_time'] = None
recording['state'] = 'recording'
recording['cameras_processed'] = []
recording['cameras_processing'] = []
cameras_recorded = await camera_controller.send_command({
'event': 'start_recording',
'data': recording,
})
recording['cameras_recorded'] = cameras_recorded
recording = await self.update(recording)
#send trigger to TMSi Porti
thread = threading.Thread(
target=self.sendtrigger, args=(recording), daemon=True)
thread.start()
return recording
async def stop(self, recording_id):
async with self.db.transaction():
recording = await self.get(recording_id)
if not recording:
return
await camera_controller.send_command({
'event': 'stop_recording',
'data': recording,
})
recording['end_time'] = datetime.now().astimezone().isoformat()
recording['state'] = 'unprocessed'
recording = await self.update(recording)
return recording
async def discard(self, recording_id):
async with self.db.transaction():
recording = await self.get(recording_id)
if not recording:
return
await camera_controller.send_command({
'event': 'discard_recording',
'data': recording,
})
recording['start_time'] = None
recording['end_time'] = None
recording['cameras_recorded'] = []
recording['state'] = 'empty'
recording = await self.update(recording)
return recording
async def process(self, recording_id):
async with self.db.transaction():
await self.db.execute('LOCK TABLE recordings IN SHARE ROW EXCLUSIVE MODE')
recording = await self.get(recording_id)
if not recording:
return
local_storage_path = await self.get_local_storage_path(recording)
remote_storage_path = await self.get_remote_storage_path(recording)
if not os.path.exists(remote_storage_path):
os.makedirs(remote_storage_path)
recording_metadata = await self.get_recording_metadata(recording)
metadata_path = '{}/metadata.json'.format(remote_storage_path)
with open(metadata_path, 'w') as file:
file.write(json.dumps(recording_metadata, indent=2))
cameras_processing = await camera_controller.send_command({
'event': 'process_recording',
'data': recording,
})
recording['cameras_processing'] = cameras_processing
recording['cameras_processed'] = []
recording['state'] = 'processing'
recording = await self.update(recording)
return recording
async def processed(self, recording_id, camera_id):
recording = await self.get(recording_id)
camera = await camera_controller.get(camera_id)
source = await self.get_camera_file_path(recording, camera_id)
base_path = await self.get_remote_storage_path(recording)
dest = '{}/{}.mp4'.format(base_path, camera.get('name', camera_id))
thread = threading.Thread(
target=self.upload, args=(source, dest), daemon=True)
thread.start()
async with self.db.transaction():
await self.db.execute('LOCK TABLE recordings IN SHARE ROW EXCLUSIVE MODE')
recording = await self.get(recording.get('id'))
if not recording:
return
recording['cameras_processing'].remove(camera_id)
recording['cameras_processed'].append(camera_id)
if len(recording['cameras_processing']) == 0:
recording['state'] = 'processed'
recording = await self.update(recording)
return recording
def upload(self, source, dest):
logger.info('Upload {} to {}'.format(source, dest))
shutil.move(source, dest)
logger.info('Done uploading {}'.format(dest))
async def get_all(self):
query = recordings.select()
results = await self.db.fetch_all(query)
return [self.parse_row(result) for result in results]
async def create(self, recording):
recording_id = shortuuid.uuid()
query = recordings.insert().values(
id=recording_id,
participant_id=recording.get('participant_id'),
name=recording.get('name'),
file_path=recording.get('file_path'),
start_time=recording.get('start_time'),
end_time=recording.get('end_time'),
cameras_recorded=json.dumps([]),
cameras_processing=json.dumps([]),
cameras_processed=json.dumps([]),
state='empty',
)
await self.db.execute(query)
recording = await self.get(recording_id)
await self.broadcast({
'event': 'create',
'entity': recording
})
return recording
async def get(self, recording_id):
query = recordings.select().where(recordings.c.id == recording_id)
result = await self.db.fetch_one(query)
if result:
return self.parse_row(result)
async def update(self, recording):
query = recordings.update().where(recordings.c.id == recording.get('id')).values(
id=recording.get('id'),
participant_id=recording.get('participant_id'),
name=recording.get('name'),
file_path=recording.get('file_path'),
start_time=recording.get('start_time'),
end_time=recording.get('end_time'),
cameras_recorded=json.dumps(recording.get('cameras_recorded')),
cameras_processing=json.dumps(recording.get('cameras_processing')),
cameras_processed=json.dumps(recording.get('cameras_processed')),
state=recording.get('state'),
)
await self.db.execute(query)
recording = await self.get(recording.get('id'))
await self.broadcast({
'event': 'update',
'entity': recording
})
return recording
async def delete(self, recording_id):
recording = await self.get(recording_id)
query = recordings.delete().where(recordings.c.id == recording_id)
await self.db.execute(query)
await self.broadcast({
'event': 'delete',
'entity': recording
})
return recording
recording_controller = RecordingController()
```
#### File: controller/db/database.py
```python
from databases import Database
from sqlalchemy import create_engine, MetaData
metadata = MetaData()
_db = None
def get_db(url = None):
global _db
if url:
if _db:
_db.disconnect()
_db = Database(url)
return _db
async def init_tables(url):
engine = create_engine(url)
# metadata.drop_all(bind=engine)
metadata.create_all(bind=engine, checkfirst=True)
``` |
{
"source": "jinnerbichler/FARM",
"score": 2
} |
#### File: FARM/farm/infer.py
```python
import os
import torch
import logging
import multiprocessing as mp
import numpy as np
from contextlib import ExitStack
from functools import partial
from torch.utils.data.sampler import SequentialSampler
from tqdm import tqdm
from farm.data_handler.dataloader import NamedDataLoader
from farm.modeling.adaptive_model import AdaptiveModel
from farm.utils import initialize_device_settings
from farm.data_handler.processor import Processor, InferenceProcessor
from farm.utils import set_all_seeds
from farm.utils import log_ascii_workers
from farm.data_handler.utils import grouper
logger = logging.getLogger(__name__)
class Inferencer:
"""
Loads a saved AdaptiveModel from disk and runs it in inference mode. Can be used for a model with prediction head (down-stream predictions) and without (using LM as embedder).
Example usage:
.. code-block:: python
# down-stream inference
basic_texts = [
{"text": "Schartau sagte dem Tagesspiegel, dass Fischer ein Idiot sei"},
{"text": "<NAME> spielt Handball in Berlin"},
]
model = Inferencer.load(your_model_dir)
model.inference_from_dicts(dicts=basic_texts)
# LM embeddings
model.extract_vectors(dicts=basic_texts)
"""
def __init__(
self,
model,
processor,
batch_size=4,
gpu=False,
name=None,
return_class_probs=False
):
"""
Initializes Inferencer from an AdaptiveModel and a Processor instance.
:param model: AdaptiveModel to run in inference mode
:type model: AdaptiveModel
:param processor: A dataset specific Processor object which will turn input (file or dict) into a Pytorch Dataset.
:type processor: Processor
:param batch_size: Number of samples computed once per batch
:type batch_size: int
:param gpu: If GPU shall be used
:type gpu: bool
:param name: Name for the current Inferencer model, displayed in the REST API
:type name: string
:param return_class_probs: either return probability distribution over all labels or the prob of the associated label
:type return_class_probs: bool
:return: An instance of the Inferencer.
"""
# Init device and distributed settings
device, n_gpu = initialize_device_settings(use_cuda=gpu, local_rank=-1, fp16=False)
self.processor = processor
self.model = model
self.model.eval()
self.batch_size = batch_size
self.device = device
self.language = self.model.language_model.language
# TODO adjust for multiple prediction heads
if len(self.model.prediction_heads) == 1:
self.prediction_type = self.model.prediction_heads[0].model_type
# self.label_map = self.processor.label_maps[0]
elif len(self.model.prediction_heads) == 0:
self.prediction_type = "embedder"
# else:
# raise NotImplementedError("A model with multiple prediction heads is currently not supported by the Inferencer")
self.name = name if name != None else f"anonymous-{self.prediction_type}"
self.return_class_probs = return_class_probs
model.connect_heads_with_processor(processor.tasks, require_labels=False)
set_all_seeds(42, n_gpu)
@classmethod
def load(
cls,
load_dir,
batch_size=4,
gpu=False,
embedder_only=False,
return_class_probs=False,
):
"""
Initializes Inferencer from directory with saved model.
:param load_dir: Directory where the saved model is located.
:type load_dir: str
:param batch_size: Number of samples computed once per batch
:type batch_size: int
:param gpu: If GPU shall be used
:type gpu: bool
:param embedder_only: If true, a faster processor (InferenceProcessor) is loaded. This should only be used for extracting embeddings (no downstream predictions).
:type embedder_only: bool
:return: An instance of the Inferencer.
"""
device, n_gpu = initialize_device_settings(use_cuda=gpu, local_rank=-1, fp16=False)
model = AdaptiveModel.load(load_dir, device)
if embedder_only:
# model.prediction_heads = []
processor = InferenceProcessor.load_from_dir(load_dir)
else:
processor = Processor.load_from_dir(load_dir)
name = os.path.basename(load_dir)
return cls(
model,
processor,
batch_size=batch_size,
gpu=gpu,
name=name,
return_class_probs=return_class_probs,
)
def inference_from_file(self, file):
dicts = self.processor.file_to_dicts(file)
preds_all = self.inference_from_dicts(dicts, rest_api_schema=False)
return preds_all
def inference_from_dicts(self, dicts, rest_api_schema=False, use_multiprocessing=True):
"""
Runs down-stream inference using the prediction head.
:param dicts: Samples to run inference on provided as a list of dicts. One dict per sample.
:type dicts: [dict]
:param rest_api_schema: whether conform to the schema used for dicts in the HTTP API for Inference.
:type rest_api_schema: bool
:return: dict of predictions
:param use_multiprocessing: time incurred in spawning processes could outweigh performance boost for very small
number of dicts, eg, HTTP APIs for inference. This flags allows to disable multiprocessing for such cases.
"""
if self.prediction_type == "embedder":
raise TypeError(
"You have called inference_from_dicts for a model without any prediction head! "
"If you want to: "
"a) ... extract vectors from the language model: call `Inferencer.extract_vectors(...)`"
f"b) ... run inference on a downstream task: make sure your model path {self.name} contains a saved prediction head"
)
num_cpus = mp.cpu_count() or 1
dicts_per_cpu = np.ceil(len(dicts) / num_cpus)
# automatic adjustment of multiprocessing chunksize
# for small files (containing few dicts) we want small chunksize to ulitize all available cores but never less
# than 2, because we need it to sample another random sentence in LM finetuning
# for large files we want to minimize processor spawning without giving too much data to one process, so we
# clip it at 5k
multiprocessing_chunk_size = int(np.clip((np.ceil(dicts_per_cpu / 5)), a_min=2, a_max=5000))
dict_batches_to_process = int(len(dicts) / multiprocessing_chunk_size)
num_cpus_used = min(mp.cpu_count(), dict_batches_to_process) or 1
if use_multiprocessing:
with ExitStack() as stack:
p = stack.enter_context(mp.Pool(processes=num_cpus_used))
logger.info(
f"Got ya {num_cpus_used} parallel workers to do inference on {len(dicts)}dicts (chunksize = {multiprocessing_chunk_size})..."
)
log_ascii_workers(num_cpus_used, logger)
results = p.imap(
partial(self._multiproc, processor=self.processor, rest_api_schema=rest_api_schema),
grouper(dicts, multiprocessing_chunk_size),
1,
)
preds_all = []
with tqdm(total=len(dicts), unit=" Dicts") as pbar:
for dataset, tensor_names, sample in results:
preds_all.extend(self._run_inference(dataset, tensor_names, sample))
pbar.update(multiprocessing_chunk_size)
else:
chunk = next(grouper(dicts, len(dicts)))
dataset, tensor_names, sample = self._multiproc(chunk, processor=self.processor, rest_api_schema=rest_api_schema)
preds_all = self._run_inference(dataset, tensor_names, sample)
return preds_all
@classmethod
def _multiproc(cls, chunk, processor, rest_api_schema):
dicts = [d[1] for d in chunk]
index = chunk[0][0]
dataset, tensor_names = processor.dataset_from_dicts(dicts, index, rest_api_schema)
samples = []
for d in dicts:
samples.extend(processor._dict_to_samples(d))
return dataset, tensor_names, samples
def _run_inference(self, dataset, tensor_names, samples):
data_loader = NamedDataLoader(
dataset=dataset, sampler=SequentialSampler(dataset), batch_size=self.batch_size, tensor_names=tensor_names
)
preds_all = []
for i, batch in enumerate(data_loader):
batch = {key: batch[key].to(self.device) for key in batch}
batch_samples = samples[i * self.batch_size : (i + 1) * self.batch_size]
with torch.no_grad():
logits = self.model.forward(**batch)
preds = self.model.formatted_preds(
logits=logits,
samples=batch_samples, # TODO batch_samples and logits are not aligned
tokenizer=self.processor.tokenizer,
return_class_probs=self.return_class_probs,
**batch,
)
preds_all += preds
return preds_all
def extract_vectors(self, dicts, extraction_strategy="cls_token", extraction_layer=-1):
"""
Converts a text into vector(s) using the language model only (no prediction head involved).
:param dicts: Samples to run inference on provided as a list of dicts. One dict per sample.
:type dicts: [dict]
:param extraction_strategy: Strategy to extract vectors. Choices: 'cls_token' (sentence vector), 'reduce_mean'
(sentence vector), reduce_max (sentence vector), 'per_token' (individual token vectors)
:type extraction_strategy: str
:param extraction_layer: number of layer from which the embeddings shall be extracted. Default: -1 (very last layer).
:type: int
:return: dict of predictions
"""
dataset, tensor_names = self.processor.dataset_from_dicts(dicts, rest_api_schema=True)
samples = []
for dict in dicts:
samples.extend(self.processor._dict_to_samples(dict))
data_loader = NamedDataLoader(
dataset=dataset, sampler=SequentialSampler(dataset), batch_size=self.batch_size, tensor_names=tensor_names
)
preds_all = []
for i, batch in enumerate(data_loader):
batch = {key: batch[key].to(self.device) for key in batch}
batch_samples = samples[i * self.batch_size : (i + 1) * self.batch_size]
with torch.no_grad():
preds = self.model.language_model.formatted_preds(
extraction_strategy=extraction_strategy,
samples=batch_samples,
tokenizer=self.processor.tokenizer,
extraction_layer=extraction_layer,
**batch,
)
preds_all += preds
return preds_all
class FasttextInferencer:
def __init__(self, model, name=None):
self.model = model
self.name = name if name != None else f"anonymous-fasttext"
self.prediction_type = "embedder"
@classmethod
def load(cls, load_dir, batch_size=4, gpu=False, embedder_only=True):
import fasttext
if os.path.isfile(load_dir):
return cls(model=fasttext.load_model(load_dir))
else:
logger.error(f"Fasttext model file does not exist at: {load_dir}")
def extract_vectors(self, dicts, extraction_strategy="reduce_mean"):
"""
Converts a text into vector(s) using the language model only (no prediction head involved).
:param dicts: Samples to run inference on provided as a list of dicts. One dict per sample.
:type dicts: [dict]
:param extraction_strategy: Strategy to extract vectors. Choices: 'reduce_mean' (mean sentence vector), 'reduce_max' (max per embedding dim), 'CLS'
:type extraction_strategy: str
:return: dict of predictions
"""
preds_all = []
for d in dicts:
pred = {}
pred["context"] = d["text"]
if extraction_strategy == "reduce_mean":
pred["vec"] = self.model.get_sentence_vector(d["text"])
else:
raise NotImplementedError
preds_all.append(pred)
return preds_all
```
#### File: farm/modeling/prediction_head.py
```python
import json
import logging
import os
import numpy as np
from scipy.special import expit
import torch
from transformers.modeling_bert import BertForPreTraining, BertLayerNorm, ACT2FN
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss
from farm.data_handler.utils import is_json
from farm.utils import convert_iob_to_simple_tags
logger = logging.getLogger(__name__)
class PredictionHead(nn.Module):
""" Takes word embeddings from a language model and generates logits for a given task. Can also convert logits
to loss and and logits to predictions. """
subclasses = {}
def __init_subclass__(cls, **kwargs):
""" This automatically keeps track of all available subclasses.
Enables generic load() for all specific PredictionHead implementation.
"""
super().__init_subclass__(**kwargs)
cls.subclasses[cls.__name__] = cls
@classmethod
def create(cls, prediction_head_name, layer_dims, class_weights=None):
"""
Create subclass of Prediction Head.
:param prediction_head_name: Classname (exact string!) of prediction head we want to create
:type prediction_head_name: str
:param layer_dims: describing the feed forward block structure, e.g. [768,2]
:type layer_dims: List[Int]
:param class_weights: The loss weighting to be assigned to certain label classes during training.
Used to correct cases where there is a strong class imbalance.
:type class_weights: list[Float]
:return: Prediction Head of class prediction_head_name
"""
# TODO make we want to make this more generic.
# 1. Class weights is not relevant for all heads.
# 2. Layer weights impose FF structure, maybe we want sth else later
# Solution: We could again use **kwargs
return cls.subclasses[prediction_head_name](
layer_dims=layer_dims, class_weights=class_weights
)
def save_config(self, save_dir, head_num=0):
"""
Saves the config as a json file.
:param save_dir: Path to save config to
:type save_dir: str
:param head_num: Which head to save
:type head_num: int
"""
output_config_file = os.path.join(
save_dir, f"prediction_head_{head_num}_config.json"
)
with open(output_config_file, "w") as file:
json.dump(self.config, file)
def save(self, save_dir, head_num=0):
"""
Saves the prediction head state dict.
:param save_dir: path to save prediction head to
:type save_dir: str
:param head_num: which head to save
:type head_num: int
"""
output_model_file = os.path.join(save_dir, f"prediction_head_{head_num}.bin")
torch.save(self.state_dict(), output_model_file)
self.save_config(save_dir, head_num)
def generate_config(self):
"""
Generates config file from Class parameters (only for sensible config parameters).
"""
config = {}
for key, value in self.__dict__.items():
if is_json(value) and key[0] != "_":
config[key] = value
config["name"] = self.__class__.__name__
self.config = config
@classmethod
def load(cls, config_file):
"""
Loads a Prediction Head. Infers the class of prediction head from config_file.
:param config_file: location where corresponding config is stored
:type config_file: str
:return: PredictionHead
:rtype: PredictionHead[T]
"""
config = json.load(open(config_file))
prediction_head = cls.subclasses[config["name"]](**config)
model_file = cls._get_model_file(config_file=config_file)
logger.info("Loading prediction head from {}".format(model_file))
prediction_head.load_state_dict(torch.load(model_file, map_location=torch.device("cpu")))
return prediction_head
def logits_to_loss(self, logits, labels):
"""
Implement this function in your special Prediction Head.
Should combine logits and labels with a loss fct to a per sample loss.
:param logits: logits, can vary in shape and type, depending on task
:type logits: object
:param labels: labels, can vary in shape and type, depending on task
:type labels: object
:return: per sample loss as a torch.tensor of shape [batch_size]
"""
raise NotImplementedError()
def logits_to_preds(self, logits):
"""
Implement this function in your special Prediction Head.
Should combine turn logits into predictions.
:param logits: logits, can vary in shape and type, depending on task
:type logits: object
:return: predictions as a torch.tensor of shape [batch_size]
"""
raise NotImplementedError()
def prepare_labels(self, **kwargs):
"""
Some prediction heads need additional label conversion.
E.g. NER needs word level labels turned into subword token level labels.
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
:return: labels in the right format
:rtype: object
"""
# TODO maybe just return **kwargs to not force people to implement this
raise NotImplementedError()
@classmethod
def _get_model_file(cls, config_file):
if "config.json" in config_file and "prediction_head" in config_file:
head_num = int("".join([char for char in os.path.basename(config_file) if char.isdigit()]))
model_file = os.path.join(os.path.dirname(config_file), f"prediction_head_{head_num}.bin")
else:
raise ValueError(f"This doesn't seem to be a proper prediction_head config file: '{config_file}'")
return model_file
def _set_name(self, name):
self.task_name = name
class RegressionHead(PredictionHead):
def __init__(
self,
layer_dims,
loss_ignore_index=-100,
loss_reduction="none",
task_name="regression",
**kwargs,
):
super(RegressionHead, self).__init__()
# num_labels could in most cases also be automatically retrieved from the data processor
self.layer_dims = layer_dims
# TODO is this still needed?
self.feed_forward = FeedForwardBlock(self.layer_dims)
self.num_labels = 2
self.ph_output_type = "per_sequence_continuous"
self.model_type = "regression"
self.loss_fct = MSELoss(reduction="none")
self.task_name = task_name
self.generate_config()
def forward(self, x):
logits = self.feed_forward(x)
return logits
def logits_to_loss(self, logits, **kwargs):
# Squeeze the logits to obtain a coherent output size
label_ids = kwargs.get(self.label_tensor_name)
return self.loss_fct(logits.squeeze(), label_ids.float())
def logits_to_preds(self, logits, **kwargs):
preds = logits.cpu().numpy()
#rescale predictions to actual label distribution
preds = [x * self.label_list[1] + self.label_list[0] for x in preds]
return preds
def prepare_labels(self, **kwargs):
label_ids = kwargs.get(self.label_tensor_name)
label_ids = label_ids.cpu().numpy()
label_ids = [x * self.label_list[1] + self.label_list[0] for x in label_ids]
return label_ids
def formatted_preds(self, logits, samples, **kwargs):
preds = self.logits_to_preds(logits)
contexts = [sample.clear_text["text"] for sample in samples]
assert len(preds) == len(contexts)
res = {"task": "regression", "predictions": []}
for pred, context in zip(preds, contexts):
res["predictions"].append(
{
"context": f"{context}",
"pred": pred[0]
}
)
return res
class TextClassificationHead(PredictionHead):
def __init__(
self,
layer_dims,
class_weights=None,
loss_ignore_index=-100,
loss_reduction="none",
task_name="text_classification",
**kwargs,
):
super(TextClassificationHead, self).__init__()
# num_labels could in most cases also be automatically retrieved from the data processor
self.layer_dims = layer_dims
self.feed_forward = FeedForwardBlock(self.layer_dims)
self.num_labels = self.layer_dims[-1]
self.ph_output_type = "per_sequence"
self.model_type = "text_classification"
self.task_name = task_name #used for connecting with the right output of the processor
self.class_weights = class_weights
if class_weights:
logger.info(f"Using class weights for task '{self.task_name}': {self.class_weights}")
#TODO must balanced weight really be an instance attribute?
self.balanced_weights = nn.Parameter(
torch.tensor(class_weights), requires_grad=False
)
else:
self.balanced_weights = None
self.loss_fct = CrossEntropyLoss(
weight=self.balanced_weights,
reduction=loss_reduction,
ignore_index=loss_ignore_index,
)
self.generate_config()
def forward(self, X):
logits = self.feed_forward(X)
return logits
def logits_to_loss(self, logits, **kwargs):
label_ids = kwargs.get(self.label_tensor_name)
return self.loss_fct(logits, label_ids.view(-1))
def logits_to_probs(self, logits, return_class_probs, **kwargs):
softmax = torch.nn.Softmax(dim=1)
probs = softmax(logits)
if return_class_probs:
probs = probs
else:
probs = torch.max(probs, dim=1)[0]
probs = probs.cpu().numpy()
return probs
def logits_to_preds(self, logits, **kwargs):
logits = logits.cpu().numpy()
pred_ids = logits.argmax(1)
preds = [self.label_list[int(x)] for x in pred_ids]
return preds
def prepare_labels(self, **kwargs):
label_ids = kwargs.get(self.label_tensor_name)
label_ids = label_ids.cpu().numpy()
labels = [self.label_list[int(x)] for x in label_ids]
return labels
def formatted_preds(self, logits, samples, return_class_probs=False, **kwargs):
preds = self.logits_to_preds(logits)
probs = self.logits_to_probs(logits, return_class_probs)
contexts = [sample.clear_text["text"] for sample in samples]
assert len(preds) == len(probs) == len(contexts)
res = {"task": "text_classification", "predictions": []}
for pred, prob, context in zip(preds, probs, contexts):
if not return_class_probs:
pred_dict = {
"start": None,
"end": None,
"context": f"{context}",
"label": f"{pred}",
"probability": prob,
}
else:
pred_dict = {
"start": None,
"end": None,
"context": f"{context}",
"label": "class_probabilities",
"probability": prob,
}
res["predictions"].append(pred_dict)
return res
class MultiLabelTextClassificationHead(PredictionHead):
def __init__(
self,
layer_dims,
class_weights=None,
loss_reduction="none",
task_name="text_classification",
pred_threshold=0.5,
**kwargs,
):
super(MultiLabelTextClassificationHead, self).__init__()
# num_labels could in most cases also be automatically retrieved from the data processor
self.layer_dims = layer_dims
self.feed_forward = FeedForwardBlock(self.layer_dims)
self.num_labels = self.layer_dims[-1]
self.ph_output_type = "per_sequence"
self.model_type = "multilabel_text_classification"
self.task_name = task_name #used for connecting with the right output of the processor
self.class_weights = class_weights
self.pred_threshold = pred_threshold
if class_weights:
logger.info(f"Using class weights for task '{self.task_name}': {self.class_weights}")
#TODO must balanced weight really be a instance attribute?
self.balanced_weights = nn.Parameter(
torch.tensor(class_weights), requires_grad=False
)
else:
self.balanced_weights = None
self.loss_fct = BCEWithLogitsLoss(pos_weight=self.balanced_weights,
reduction=loss_reduction)
self.generate_config()
def forward(self, X):
logits = self.feed_forward(X)
return logits
def logits_to_loss(self, logits, **kwargs):
label_ids = kwargs.get(self.label_tensor_name).to(dtype=torch.float)
loss = self.loss_fct(logits.view(-1, self.num_labels), label_ids.view(-1, self.num_labels))
per_sample_loss = loss.mean(1)
return per_sample_loss
def logits_to_probs(self, logits, **kwargs):
sigmoid = torch.nn.Sigmoid()
probs = sigmoid(logits)
probs = probs.cpu().numpy()
return probs
def logits_to_preds(self, logits, **kwargs):
probs = self.logits_to_probs(logits)
#TODO we could potentially move this to GPU to speed it up
pred_ids = [np.where(row > self.pred_threshold)[0] for row in probs]
preds = []
for row in pred_ids:
preds.append([self.label_list[int(x)] for x in row])
return preds
def prepare_labels(self, **kwargs):
label_ids = kwargs.get(self.label_tensor_name)
label_ids = label_ids.cpu().numpy()
label_ids = [np.where(row == 1)[0] for row in label_ids]
labels = []
for row in label_ids:
labels.append([self.label_list[int(x)] for x in row])
return labels
def formatted_preds(self, logits, samples, **kwargs):
preds = self.logits_to_preds(logits)
probs = self.logits_to_probs(logits)
contexts = [sample.clear_text["text"] for sample in samples]
assert len(preds) == len(probs) == len(contexts)
res = {"task": "text_classification", "predictions": []}
for pred, prob, context in zip(preds, probs, contexts):
res["predictions"].append(
{
"start": None,
"end": None,
"context": f"{context}",
"label": f"{pred}",
"probability": prob,
}
)
return res
class TokenClassificationHead(PredictionHead):
def __init__(self, layer_dims, task_name="ner", **kwargs):
super(TokenClassificationHead, self).__init__()
self.layer_dims = layer_dims
self.feed_forward = FeedForwardBlock(self.layer_dims)
self.num_labels = self.layer_dims[-1]
self.loss_fct = CrossEntropyLoss(reduction="none")
self.ph_output_type = "per_token"
self.model_type = "token_classification"
self.task_name = task_name
self.generate_config()
def forward(self, X):
logits = self.feed_forward(X)
return logits
def logits_to_loss(
self, logits, initial_mask, padding_mask=None, **kwargs
):
label_ids = kwargs.get(self.label_tensor_name)
# Todo: should we be applying initial mask here? Loss is currently calculated even on non initial tokens
active_loss = padding_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = label_ids.view(-1)[active_loss]
loss = self.loss_fct(
active_logits, active_labels
) # loss is a 1 dimemnsional (active) token loss
return loss
def logits_to_preds(self, logits, initial_mask, **kwargs):
preds_word_all = []
preds_tokens = torch.argmax(logits, dim=2)
preds_token = preds_tokens.detach().cpu().numpy()
# used to be: padding_mask = padding_mask.detach().cpu().numpy()
initial_mask = initial_mask.detach().cpu().numpy()
for idx, im in enumerate(initial_mask):
preds_t = preds_token[idx]
# Get labels and predictions for just the word initial tokens
preds_word_id = self.initial_token_only(preds_t, initial_mask=im)
preds_word = [self.label_list[pwi] for pwi in preds_word_id]
preds_word_all.append(preds_word)
return preds_word_all
def logits_to_probs(self, logits, initial_mask, return_class_probs, **kwargs):
# get per token probs
softmax = torch.nn.Softmax(dim=2)
token_probs = softmax(logits)
if return_class_probs:
token_probs = token_probs
else:
token_probs = torch.max(token_probs, dim=2)[0]
token_probs = token_probs.cpu().numpy()
# convert to per word probs
all_probs = []
initial_mask = initial_mask.detach().cpu().numpy()
for idx, im in enumerate(initial_mask):
probs_t = token_probs[idx]
probs_words = self.initial_token_only(probs_t, initial_mask=im)
all_probs.append(probs_words)
return all_probs
def prepare_labels(self, initial_mask, **kwargs):
label_ids = kwargs.get(self.label_tensor_name)
labels_all = []
label_ids = label_ids.cpu().numpy()
for label_ids_one_sample, initial_mask_one_sample in zip(
label_ids, initial_mask
):
label_ids = self.initial_token_only(
label_ids_one_sample, initial_mask_one_sample
)
labels = [self.label_list[l] for l in label_ids]
labels_all.append(labels)
return labels_all
@staticmethod
def initial_token_only(seq, initial_mask):
ret = []
for init, s in zip(initial_mask, seq):
if init:
ret.append(s)
return ret
def formatted_preds(self, logits, initial_mask, samples, return_class_probs=False, **kwargs):
preds = self.logits_to_preds(logits, initial_mask)
probs = self.logits_to_probs(logits, initial_mask,return_class_probs)
# align back with original input by getting the original word spans
spans = []
for sample, sample_preds in zip(samples, preds):
word_spans = []
span = None
for token, offset, start_of_word in zip(
sample.tokenized["tokens"],
sample.tokenized["offsets"],
sample.tokenized["start_of_word"],
):
if start_of_word:
# previous word has ended unless it's the very first word
if span is not None:
word_spans.append(span)
span = {"start": offset, "end": offset + len(token)}
else:
# expand the span to include the subword-token
span["end"] = offset + len(token.replace("##", ""))
word_spans.append(span)
spans.append(word_spans)
assert len(preds) == len(probs) == len(spans)
res = {"task": "ner", "predictions": []}
for preds_seq, probs_seq, sample, spans_seq in zip(
preds, probs, samples, spans
):
tags, spans_seq = convert_iob_to_simple_tags(preds_seq, spans_seq)
seq_res = []
for tag, prob, span in zip(tags, probs_seq, spans_seq):
context = sample.clear_text["text"][span["start"] : span["end"]]
seq_res.append(
{
"start": span["start"],
"end": span["end"],
"context": f"{context}",
"label": f"{tag}",
"probability": prob,
}
)
res["predictions"].extend(seq_res)
return res
class BertLMHead(PredictionHead):
def __init__(self, hidden_size, vocab_size, hidden_act="gelu", task_name="lm", **kwargs):
super(BertLMHead, self).__init__()
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.vocab_size = vocab_size
self.loss_fct = CrossEntropyLoss(reduction="none", ignore_index=-1)
self.num_labels = vocab_size # vocab size
# TODO Check if weight init needed!
# self.apply(self.init_bert_weights)
self.ph_output_type = "per_token"
self.model_type = "language_modelling"
self.task_name = task_name
self.generate_config()
# NN Layers
# this is the "transform" module in the pytorch-transformers repo
self.dense = nn.Linear(self.hidden_size, self.hidden_size)
self.transform_act_fn = ACT2FN[self.hidden_act]
self.LayerNorm = BertLayerNorm(self.hidden_size, eps=1e-12)
# this is the "decoder" in the pytorch-transformers repo
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(hidden_size,
vocab_size,
bias=False)
self.bias = nn.Parameter(torch.zeros(vocab_size))
@classmethod
def load(cls, pretrained_model_name_or_path):
if os.path.exists(pretrained_model_name_or_path) \
and "config.json" in pretrained_model_name_or_path \
and "prediction_head" in pretrained_model_name_or_path:
config_file = os.path.exists(pretrained_model_name_or_path)
# a) FARM style
model_file = cls._get_model_file(config_file)
config = json.load(open(config_file))
prediction_head = cls(**config)
logger.info("Loading prediction head from {}".format(model_file))
prediction_head.load_state_dict(torch.load(model_file, map_location=torch.device("cpu")))
else:
# b) pytorch-transformers style
# load weights from bert model
# (we might change this later to load directly from a state_dict to generalize for other language models)
bert_with_lm = BertForPreTraining.from_pretrained(pretrained_model_name_or_path)
# init empty head
head = cls(hidden_size=bert_with_lm.config.hidden_size,
vocab_size=bert_with_lm.config.vocab_size,
hidden_act=bert_with_lm.config.hidden_act)
# load weights
head.dense.load_state_dict(bert_with_lm.cls.predictions.transform.dense.state_dict())
head.LayerNorm.load_state_dict(bert_with_lm.cls.predictions.transform.LayerNorm.state_dict())
head.decoder.load_state_dict(bert_with_lm.cls.predictions.decoder.state_dict())
head.bias.data.copy_(bert_with_lm.cls.predictions.bias)
del bert_with_lm
return head
def set_shared_weights(self, shared_embedding_weights):
self.decoder.weight = shared_embedding_weights
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
lm_logits = self.decoder(hidden_states) + self.bias
return lm_logits
def logits_to_loss(self, logits, **kwargs):
lm_label_ids = kwargs.get(self.label_tensor_name)
batch_size = lm_label_ids.shape[0]
masked_lm_loss = self.loss_fct(
logits.view(-1, self.num_labels), lm_label_ids.view(-1)
)
per_sample_loss = masked_lm_loss.view(-1, batch_size).mean(dim=0)
return per_sample_loss
def logits_to_preds(self, logits, **kwargs):
logits = logits.cpu().numpy()
lm_label_ids = kwargs.get(self.label_tensor_name).cpu().numpy()
lm_preds_ids = logits.argmax(2)
# apply mask to get rid of predictions for non-masked tokens
assert lm_preds_ids.shape == lm_label_ids.shape
lm_preds_ids[lm_label_ids == -1] = -1
lm_preds_ids = lm_preds_ids.tolist()
preds = []
# we have a batch of sequences here. we need to convert for each token in each sequence.
for pred_ids_for_sequence in lm_preds_ids:
preds.append(
[self.label_list[int(x)] for x in pred_ids_for_sequence if int(x) != -1]
)
return preds
def prepare_labels(self, **kwargs):
label_ids = kwargs.get(self.label_tensor_name)
label_ids = label_ids.cpu().numpy().tolist()
labels = []
# we have a batch of sequences here. we need to convert for each token in each sequence.
for ids_for_sequence in label_ids:
labels.append([self.label_list[int(x)] for x in ids_for_sequence if int(x) != -1])
return labels
class NextSentenceHead(TextClassificationHead):
"""
Almost identical to a TextClassificationHead. Only difference: we can load the weights from
a pretrained language model that was saved in the pytorch-transformers style (all in one model).
"""
@classmethod
def load(cls, pretrained_model_name_or_path):
if os.path.exists(pretrained_model_name_or_path) \
and "config.json" in pretrained_model_name_or_path \
and "prediction_head" in pretrained_model_name_or_path:
config_file = os.path.exists(pretrained_model_name_or_path)
# a) FARM style
#TODO validate saving/loading after switching to processor.tasks
model_file = cls._get_model_file(config_file)
config = json.load(open(config_file))
prediction_head = cls(**config)
logger.info("Loading prediction head from {}".format(model_file))
prediction_head.load_state_dict(torch.load(model_file, map_location=torch.device("cpu")))
else:
# b) pytorch-transformers style
# load weights from bert model
# (we might change this later to load directly from a state_dict to generalize for other language models)
bert_with_lm = BertForPreTraining.from_pretrained(pretrained_model_name_or_path)
# init empty head
head = cls(layer_dims=[bert_with_lm.config.hidden_size, 2], loss_ignore_index=-1, task_name="nextsentence")
# load weights
head.feed_forward.feed_forward[0].load_state_dict(bert_with_lm.cls.seq_relationship.state_dict())
del bert_with_lm
return head
class FeedForwardBlock(nn.Module):
""" A feed forward neural network of variable depth and width. """
def __init__(self, layer_dims, **kwargs):
# Todo: Consider having just one input argument
super(FeedForwardBlock, self).__init__()
# If read from config the input will be string
n_layers = len(layer_dims) - 1
layers_all = []
# TODO: IS this needed?
self.output_size = layer_dims[-1]
for i in range(n_layers):
size_in = layer_dims[i]
size_out = layer_dims[i + 1]
layer = nn.Linear(size_in, size_out)
layers_all.append(layer)
self.feed_forward = nn.Sequential(*layers_all)
def forward(self, X):
logits = self.feed_forward(X)
return logits
class QuestionAnsweringHead(PredictionHead):
"""
A question answering head predicts the start and end of the answer on token level.
"""
def __init__(self, layer_dims, task_name="question_answering", **kwargs):
"""
:param layer_dims: dimensions of Feed Forward block, e.g. [768,2], for adjusting to BERT embedding. Output should be always 2
:type layer_dims: List[Int]
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
"""
super(QuestionAnsweringHead, self).__init__()
self.layer_dims = layer_dims
self.feed_forward = FeedForwardBlock(self.layer_dims)
self.num_labels = self.layer_dims[-1]
self.ph_output_type = "per_token_squad"
self.model_type = (
"span_classification"
) # predicts start and end token of answer
self.task_name = task_name
self.generate_config()
def forward(self, X):
"""
One forward pass through the prediction head model, starting with language model output on token level
:param X: Output of language model, of shape [batch_size, seq_length, LM_embedding_dim]
:type X: torch.tensor
:return: (start_logits, end_logits), logits for the start and end of answer
:rtype: tuple[torch.tensor,torch.tensor]
"""
logits = self.feed_forward(X)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
return (start_logits, end_logits)
def logits_to_loss(self, logits, start_position, end_position, **kwargs):
"""
Combine predictions and labels to a per sample loss.
:param logits: (start_logits, end_logits), logits for the start and end of answer
:type logits: tuple[torch.tensor,torch.tensor]
:param start_position: tensor with indices of START positions per sample
:type start_position: torch.tensor
:param end_position: tensor with indices of END positions per sample
:type end_position: torch.tensor
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
:return: per_sample_loss: Per sample loss : )
:rtype: torch.tensor
"""
(start_logits, end_logits) = logits
if len(start_position.size()) > 1:
start_position = start_position.squeeze(-1)
if len(end_position.size()) > 1:
end_position = end_position.squeeze(-1)
# sometimes the start/end positions (the labels read from file) are outside our model predictions, we ignore these terms
ignored_index = start_logits.size(1)
start_position.clamp_(0, ignored_index)
end_position.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index, reduction="none")
start_loss = loss_fct(start_logits, start_position)
end_loss = loss_fct(end_logits, end_position)
per_sample_loss = (start_loss + end_loss) / 2
return per_sample_loss
def logits_to_preds(self, logits, **kwargs):
"""
Get the predicted index of start and end token of the answer.
:param logits: (start_logits, end_logits), logits for the start and end of answer
:type logits: tuple[torch.tensor,torch.tensor]
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
:return: (start_idx, end_idx), start and end indices for all samples in batch
:rtype: (torch.tensor,torch.tensor)
"""
(start_logits, end_logits) = logits
start_logits = start_logits.cpu().numpy()
end_logits = end_logits.cpu().numpy()
num_batches = start_logits.shape[0]
no_answer_sum = start_logits[:,0] + end_logits[:,0]
best_answer_sum = np.zeros(num_batches)
# check if start or end point to the context. Context starts at segment id == 1 (question comes before at segment ids == 0)
segment_ids = kwargs['segment_ids'].data.cpu().numpy()
context_start = np.argmax(segment_ids,axis=1)
start_proposals = self._get_best_indexes(start_logits, 3)
end_proposals = self._get_best_indexes(end_logits, 3)
best_indices = np.zeros((num_batches,2),dtype=int) # dimension [:,0] is start, [:,1] is end
for i_batch in range(num_batches):
# for each sample create mesh of possible start + end combinations and their score as sum of logits
mesh_idx = np.meshgrid(start_proposals[i_batch,:],end_proposals[i_batch])
start_comb = mesh_idx[0].flatten()
end_comb = mesh_idx[1].flatten()
scores = start_logits[i_batch,start_comb] + end_logits[i_batch,end_comb]
#iterate over combinations and eliminate impossible ones
for idx in np.argsort(scores)[::-1]:
start = start_comb[idx]
end = end_comb[idx]
if(start < context_start[i_batch]): #TODO check for context end as well
continue
if(end < context_start[i_batch]):
continue
if(start > end):
continue
# maybe need check: end - start > max answer len. How to set max answer len?
# maybe need check weather start/end idx refers to start of word and not to a ##... continuation
best_indices[i_batch,0] = start
best_indices[i_batch,1] = end
best_answer_sum[i_batch] = scores[idx]
break
# TODO upweight no answers here?
idx_no_answer = no_answer_sum >= best_answer_sum
best_indices[idx_no_answer,:] = 0
probabilities = np.zeros(num_batches)
for i_batch in range(num_batches):
# huggingface takes the softmax of sum of both logits for their n best predictions.
# Since we have only one prediction for now, it makes sense to take the mean of both start + end probs
probabilities[i_batch] = (expit(start_logits[i_batch, best_indices[i_batch, 0]]) +
expit(end_logits[i_batch, best_indices[i_batch, 1]])) / 2
return (best_indices[:,0], best_indices[:,1], probabilities)
def prepare_labels(self, start_position, end_position, **kwargs):
"""
We want to pack labels into a tuple, to be compliant with later functions
:param start_position: indices of answer start positions (in token space)
:type start_position: torch.tensor
:param end_position: indices of answer end positions (in token space)
:type end_position: torch.tensor
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
:return: tuplefied positions
:rtype: tuple(torch.tensor,torch.tensor)
"""
return (start_position, end_position)
def formatted_preds(self, logits, samples, segment_ids, **kwargs) -> [str]:
"""
Format predictions into actual answer strings (substrings of context). Used for Inference!
:param logits: (start_logits, end_logits), logits for the start and end of answer
:type logits: tuple[torch.tensor,torch.tensor]
:param samples: converted samples, to get a hook onto the actual text
:type samples: FARM.data_handler.samples.Sample
:param segment_ids: used to separate question and context tokens
:type segment_ids: torch.tensor
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
:return: Answers to the (ultimate) questions
:rtype: list(str)
"""
all_preds = []
# TODO fix inference bug, model.forward is somehow packing logits into list
# logits = logits[0]
start_idx, end_idx, probs = self.logits_to_preds(logits=logits, segment_ids=segment_ids)
# we have char offsets for the context passage in samples.tokenized
# we have start and end idx for the selected answer, but with the question tokens in front
# lets shift this by the index of first segment ID corresponding to context
segment_ids = segment_ids.cpu().numpy()
shifts = np.argmax(segment_ids > 0, axis=1)
start_idx = start_idx - shifts
start_idx[start_idx < 0] = 0
end_idx = end_idx - shifts
end_idx[end_idx < 0] = 0
end_idx = end_idx + 1 # slicing up to and including end
result = {}
result["task"] = "qa"
# TODO features and samples might not be aligned. We still sometimes split a sample into multiple features
for i, sample in enumerate(samples):
pred = {}
pred["context"] = sample.clear_text["question_text"]
pred["probability"] = probs[i]
try: #char offsets or indices might be out of range, then we just return no answer
start = sample.tokenized["offsets"][start_idx[i]]
end = sample.tokenized["offsets"][end_idx[i]]
pred["start"] = start
pred["end"] = end
answer = " ".join(sample.clear_text["doc_tokens"])[start:end]
answer = answer.strip()
except Exception as e:
answer = ""
logger.info(e)
pred["label"] = answer
all_preds.append(pred)
result["predictions"] = all_preds
return result
def _get_best_indexes(self, logits, n_best_size):
"""Get the n-best logits from a numpy array."""
idx = np.argsort(logits,axis=1)[:,-n_best_size:]
return idx
``` |
{
"source": "jinnerbichler/home-automflashion",
"score": 2
} |
#### File: config/custom_components/coffee_state_listener.py
```python
import logging
from homeassistant.helpers.event import track_state_change
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'coffee_state_listener'
SINGLE_COFFEE_SCRIPT = 'script.coffee_single'
DOUBLE_COFFEE_SCRIPT = 'script.coffee_double'
INIT_COFFEE_SCRIPT = 'script.coffee_init'
FUND_COFFEE_SCRIPT = 'script.coffee_fund'
CLOSE_COFFEE_SCRIPT = 'script.coffee_close'
BALANCE_COFFEE_SENSOR = 'sensor.coffee_machine_balance'
ADDRESSES_COFFEE_SENSOR = 'sensor.coffee_machine_addresses'
PROVIDER_TRANSACTION = 'weblink.coffee_provider_transaction'
COFFEE_TRANSACTION = 'weblink.coffee_machine_transaction'
COFFEE_FLASH_SERVER = 'weblink.coffee_flash_server'
PROVIDER_FLASH_SERVER = 'weblink.provider_flash_server'
def setup(hass, config):
def coffee_state_changed(entity_id, old_state, new_state):
_LOGGER.info('{} changed to {}'.format(entity_id, new_state.state))
if new_state.state in ['unknown', 'INITIALISING', 'UNINITIALISED', 'ERROR']:
# hide all but init
hide_entity(hass, entity_id=INIT_COFFEE_SCRIPT, hidden=False)
for e in [SINGLE_COFFEE_SCRIPT, DOUBLE_COFFEE_SCRIPT, FUND_COFFEE_SCRIPT, ADDRESSES_COFFEE_SENSOR,
CLOSE_COFFEE_SCRIPT, BALANCE_COFFEE_SENSOR, PROVIDER_TRANSACTION, COFFEE_TRANSACTION,
COFFEE_FLASH_SERVER, PROVIDER_FLASH_SERVER]:
hide_entity(hass, entity_id=e, hidden=True)
elif new_state.state == 'INITIALISED':
hide_entity(hass, entity_id=INIT_COFFEE_SCRIPT, hidden=True)
hide_entity(hass, entity_id=FUND_COFFEE_SCRIPT, hidden=False)
hide_entity(hass, entity_id=BALANCE_COFFEE_SENSOR, hidden=True)
hide_entity(hass, entity_id=ADDRESSES_COFFEE_SENSOR, hidden=False)
elif new_state.state == 'FUNDED':
hide_entity(hass, entity_id=FUND_COFFEE_SCRIPT, hidden=True)
hide_entity(hass, entity_id=BALANCE_COFFEE_SENSOR, hidden=False)
hide_entity(hass, entity_id=CLOSE_COFFEE_SCRIPT, hidden=False)
hide_entity(hass, entity_id=SINGLE_COFFEE_SCRIPT, hidden=False)
hide_entity(hass, entity_id=DOUBLE_COFFEE_SCRIPT, hidden=False)
hide_entity(hass, entity_id=ADDRESSES_COFFEE_SENSOR, hidden=False)
elif new_state.state == 'CLOSING':
hide_entity(hass, entity_id=SINGLE_COFFEE_SCRIPT, hidden=True)
hide_entity(hass, entity_id=DOUBLE_COFFEE_SCRIPT, hidden=True)
elif new_state.state == 'CLOSED':
hide_entity(hass, entity_id=INIT_COFFEE_SCRIPT, hidden=False)
# hide all but init and transaction
for e in [SINGLE_COFFEE_SCRIPT, DOUBLE_COFFEE_SCRIPT, FUND_COFFEE_SCRIPT, ADDRESSES_COFFEE_SENSOR,
CLOSE_COFFEE_SCRIPT, BALANCE_COFFEE_SENSOR, COFFEE_TRANSACTION, PROVIDER_TRANSACTION]:
hide_entity(hass, entity_id=e, hidden=True)
elif new_state.state in ['NO_FUNDS', 'NO_ADDRESSES_LEFT']:
for e in [SINGLE_COFFEE_SCRIPT, DOUBLE_COFFEE_SCRIPT, FUND_COFFEE_SCRIPT,
INIT_COFFEE_SCRIPT, PROVIDER_TRANSACTION, COFFEE_TRANSACTION]:
hide_entity(hass, entity_id=e, hidden=True)
hide_entity(hass, entity_id=CLOSE_COFFEE_SCRIPT, hidden=False)
hide_entity(hass, entity_id=BALANCE_COFFEE_SENSOR, hidden=False)
track_state_change(hass, entity_ids=['sensor.coffee_machine_state'], action=coffee_state_changed)
return True
def hide_entity(hass, entity_id, hidden):
_LOGGER.info("Changing hidden state of {} to {}".format(entity_id, hidden))
entity = hass.states.get(entity_id)
if entity:
attributes = {k: v for k, v in entity.attributes.items()}
attributes['hidden'] = hidden
hass.states.set(entity_id, entity.state, attributes, force_update=True)
```
#### File: home-automflashion/iri-node/fabfile.py
```python
import time
from fabric.api import run, env, task, put, cd, local, sudo
env.use_ssh_config = True
env.hosts = ['iota_node']
@task(default=True)
def iri():
run('mkdir -p /srv/private-tangle/')
with cd('/srv/private-tangle'):
put('.', '.')
run('docker-compose --project-name private-tangle pull')
run('docker-compose --project-name private-tangle up -d --force-recreate iri')
@task
def tools():
with cd('/srv/private-tangle'):
put('.', '.')
run('docker-compose --project-name private-tangle pull')
run('docker-compose --project-name private-tangle up -d --no-deps --force-recreate coordinator explorer')
run('docker-compose --project-name private-tangle logs -f --tail 100 coordinator explorer')
@task
def stop():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle stop')
@task
def stop_coord():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle stop coordinator')
@task
def down():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle down -v')
@task
def logs():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle logs -f --tail 100')
@task
def logs_coord():
with cd('/srv/private-tangle'):
run('docker-compose --project-name private-tangle logs -f --tail 100 coordinator')
@task
def logs_all():
with cd('/srv/private-tangle'):
run('docker-compose logs -f')
@task
def reset():
# stop services and delete database
down()
time.sleep(1)
run('rm -rf /srv/private-tangle/testnet_db/')
# restart all services
iri()
time.sleep(5)
tools()
``` |
{
"source": "jinnerbichler/neural-politician",
"score": 2
} |
#### File: neural-politician/intelligence/speech_data.py
```python
import pickle
import time
import urllib
from pathlib import Path
from typing import List
import spacy
import feedparser
import re
import unicodedata
import itertools
import logging
from datetime import datetime
from time import mktime
import requests
import sys
from bs4 import BeautifulSoup
from collections import defaultdict, namedtuple, OrderedDict, Counter
import os
from spacy.tokens import Doc
import numpy as np
from keras.utils import to_categorical, Sequence
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format='%(asctime)s %(name)s %(levelname)s: %(message)s')
logger = logging.getLogger('scraper')
logger.setLevel(logging.DEBUG)
DROPBOX_TOKEN = os.getenv('DROPBOX_TOKEN', '')
DROPBOX_SESSION_PATH = Path('/neural-politician') \
.joinpath(datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
PARLAMENT_BASE_URL = 'https://www.parlament.gv.at'
POLITICIANS = ['kurz', 'kern', 'strache', 'strolz']
SPEECHES_FILE = './data/speeches.pickle'
VOCAB_VECTORS_FILE = './data/word_vectors.pickle'
DATASET_FILE = './data/dataset.pickle'
PERIOD_FEEDS = {
'XXIV': 'https://www.parlament.gv.at/PAKT/PLENAR/filter.psp?view=RSS&RSS=RSS&jsMode=RSS&xdocumentUri=%2FPAKT%2FPLENAR%2Findex.shtml&view=RSS&NRBRBV=NR&GP=XXIV&R_SISTEI=SI&LISTE=Anzeigen&listeId=1070&FBEZ=FP_007',
'XXV': 'https://www.parlament.gv.at/PAKT/PLENAR/filter.psp?view=RSS&RSS=RSS&jsMode=RSS&xdocumentUri=%2FPAKT%2FPLENAR%2Findex.shtml&view=RSS&NRBRBV=NR&GP=XXV&R_SISTEI=SI&LISTE=Anzeigen&listeId=1070&FBEZ=FP_007',
'XXVI': 'https://www.parlament.gv.at/PAKT/PLENAR/filter.psp?view=RSS&RSS=RSS&jsMode=RSS&xdocumentUri=%2FPAKT%2FPLENAR%2Findex.shtml&view=RSS&NRBRBV=NR&GP=XXVI&R_SISTEI=SI&LISTE=Anzeigen&listeId=1070&FBEZ=FP_007',
}
Sentence = namedtuple('Sentence', ['words', 'politician', 'speech_id', 'sent_id'])
WordVector = namedtuple('WordVector', ['id', 'word', 'vector'])
def collect():
"""
Fetches the RSS feed for each period and extracts protocols of executed sessions.
Collected speeches a stored in a pickle file for later usage.
"""
all_speeches = defaultdict(list)
for period, feed_url in PERIOD_FEEDS.items():
logger.info('Processing period {} ({})'.format(period, feed_url))
feed = feedparser.parse(feed_url)
fetched_sessions = [] # avoid fetching a session twice
for session_iter, session in enumerate(reversed(feed['items']), start=1):
# extract session information
title = session['title']
published = datetime.fromtimestamp(mktime(session['published_parsed']))
session_url = session['link']
# check if sessions has already been fetched
if title in fetched_sessions:
continue
fetched_sessions.append(title)
logger.info('Fetching session "{}" {}/{} ({})'.format(
title, session_iter, len(feed['items']), session_url))
# fetch description of session
response = requests.get(session_url)
soup = BeautifulSoup(response.text, 'html5lib')
lists = soup.find_all('li')
# check if protocol is available
for li in [l for l in lists if 'Stenographisches Protokoll' in str(l)]:
for a in [a for a in li.find_all('a') if 'html' in str(a).lower()]:
# parse protocol
protocol_url = PARLAMENT_BASE_URL + a.attrs['href']
logger.debug('Fetching protocol {}'.format(protocol_url))
sessions_speeches = parse_protocol(protocol_url)
# enrich extracted speeches with session information
for politication, speeches in sessions_speeches.items():
for speech in speeches:
speech.update({'session': {'period': period,
'title': title,
'published': published,
'url': session_url}})
all_speeches[politication].extend(speeches)
num_speeches = sum([len(s) for s in all_speeches.values()])
logger.info('Current speech count: {}'.format(num_speeches))
# store speeches
with open(SPEECHES_FILE, 'wb') as pickle_file:
pickle.dump(all_speeches, pickle_file)
# convert to separate text file
split()
num_speeches = sum([len(s) for s in all_speeches.values()])
logger.info('Total speech count: {}'.format(num_speeches))
def parse_protocol(url):
"""
:param url: Url of created protocol
:return: dictionary, which maps a politician to a list of speeches
"""
# fetch protocol
response = requests.get(url)
response_text = response.text.replace('­', '') # remove hyphens
soup = BeautifulSoup(response_text, 'html5lib')
speeches = defaultdict(list)
# first n sections a part of the table of content
for section_iter in itertools.count(start=3):
# extract relevant pargraphs
section = soup.find('div', class_='WordSection{}'.format(section_iter))
if not section:
break
first_paragraph = section.find('p', class_='StandardRB')
other_paragraphs = section.find_all('p', class_='MsoNormal')
# extract speech
speech = first_paragraph.get_text() if first_paragraph else ''
for paragraph in other_paragraphs:
speech = '{} {}'.format(speech, paragraph.get_text())
speech = unicodedata.normalize('NFKC', speech)
speech = speech.replace('\n', ' ')
# extract name and role
prefix = re.match(r'^(.*?): ', speech)
prefix = prefix.group() if prefix else ''
speech = speech.replace(prefix, '')
prefix = prefix.strip()
match = re.match(r'^([\w\-]+) (.*?)[(|:]', prefix)
role = name = None
if match:
role = match.group(1).strip()
name = match.group(2).strip()
party = re.search(r'\((.*?)\)', prefix)
party = party.group(1) if party else None
# remove parenthesis in speech
speech = re.sub(r'\([^)]*\)', '', speech)
speech = re.sub(' +', ' ', speech) # remove double spaces
section_iter += 1
if not role or not name or not speech:
continue
# collect speeches of targeted politicians
for politician in POLITICIANS:
# 'Kurzmann' collides with 'Kurz'
if politician in name.lower() and 'Kurzmann' not in name:
logger.debug('Found speech (name: {}, role: {}, party: {})'.format(
name, role, party))
speeches[politician].append({'name': name, 'role': role,
'party': party, 'speech': speech})
return speeches
def split():
"""
Loads pickleds speeches and splits them in to separate
textfiles (i.e. on per politician).
"""
with open(SPEECHES_FILE, 'rb') as pickle_file:
speeches = pickle.load(pickle_file)
for politician, speeches in speeches.items():
filename = './data/{}.txt'.format(politician)
with open(filename, 'wt', encoding='utf8') as speeches_file:
num_char = 0
num_words = 0
for speech in speeches:
# write header and text of speech to file
session = speech['session']
header = '# {period:} - {title:} am {published:} ({url:})\n'.format(
**session)
speeches_file.write(header)
# write speech
speech_text = speech['speech'].replace('- - ', '') # parenthesis artifcat
speeches_file.write(speech_text + '\n\n')
# count metrics
num_char += len(speech['speech'])
num_words += len(speech['speech'].split())
logger.info('Metrics of {}: chars: {}, words: {}'.format(
politician, num_char, num_words))
def read_speeches(politician):
# type: (str) -> List(str)
single_speeches = []
with open('./data/{}.txt'.format(politician), 'rt', encoding='utf8') as speeches_file:
for line in speeches_file.readlines():
# ignore comments and empty lines
if line.startswith('#') or len(line) < 2:
continue
# clean speech text
speech = re.sub(r'\[[^)]*\]', '', line) # remove []
speech = speech.replace('\\', '') # replace @ sign
speech = speech.replace('@', 'at') # replace @ sign
speech = speech.replace('&', 'und') # replace and sigh
# speech = speech.replace('?', '.') # replace question mark
# speech = speech.replace('!', '.') # replace exlamation mark
speech = speech.replace('\n', '') # remove new line
speech = speech.replace('(', '').replace(')', '') # remove last parenthesis
speech = speech.replace('%', 'Prozent') # replace percentage sign
speech = speech.replace('_i', 'I') # replace gender-related underscores
speech = speech.replace('*', '') # remove invalid star
speech = speech.replace('+', '') # remove invalid plus
speech = speech.replace('’', '') # replace appostrove
speech = speech.replace('‘', '') # replace appostrove
speech = speech.replace('`', '') # replace appostrove
speech = speech.replace('“', '\'') # replace appostrove
speech = speech.replace('„', '\'') # replace appostrove
speech = speech.replace('–', '-') # replace proper hyphen
speech = speech.replace('‐', '-') # replace proper hyphen
speech = speech.replace('§', '') # remove paragrap sign
speech = speech.replace('‚', ',') # replace poper comma
speech = speech.replace(';', ',') # replace poper semi colon
speech = speech.replace('ê', 'e') # remove invalid derivative of e
speech = speech.replace('é', 'e') # remove invalid derivative of e
speech = speech.replace('à', 'a') # remove invalid derivative of a
speech = speech.replace('á', 'a') # remove invalid derivative of a
speech = speech.replace('í', 'i') # remove invalid derivative of i
speech = speech.replace('ć', 'c') # remove invalid derivative of c
speech = speech.replace('ğ', 'g') # remove invalid derivative of g
speech = speech.replace('ń', 'n') # remove invalid derivative of c
speech = speech.replace('š', 's') # remove invalid derivative of s
speech = speech.replace('ž', 'z') # remove invalid derivative of z
speech = speech.replace('!', '.')
speech = speech.replace('?', '.')
speech = re.sub(' +', ' ', speech) # remove consecutive spaces
single_speeches.append(speech)
return single_speeches
def extract_sentences(try_cached=True):
# type: (bool) -> List(Sentence)
sentences = []
sents_file = Path('./data/sentences.pickle')
if Path(sents_file).exists() and try_cached:
logger.info('Loading sentences from cache %s', sents_file)
with open(str(sents_file), 'rb') as pickle_file:
sentences = pickle.load(pickle_file)
logger.info('Loaded %d sentences from cache', len(sentences))
else:
nlp = spacy.load('de')
for politician in POLITICIANS:
logger.info('Extracting sentences of %s...', politician)
speeches = read_speeches(politician=politician)
for speech_id, speech in enumerate(speeches):
doc = nlp(speech) # type: Doc
sent_id = 0
for sent in doc.sents:
# check if valid sentence
if sent.text.startswith('-') or len(sent) < 3:
continue
sent_id += 1
words = [e.text for e in sent]
sentences.append(Sentence(words=words, politician=politician,
sent_id=sent_id, speech_id=speech_id))
logger.info('Extracted sentences. Current count %d', len(sentences))
with open(str(sents_file), 'wb') as pickle_file:
pickle.dump(sentences, pickle_file)
logger.info('Saved extracted sentences to %s', str(sents_file))
return sentences
def merge():
# extract all single speeches
all_speeches = []
for politician in POLITICIANS:
speeches = read_speeches(politician=politician)
all_speeches.extend(speeches)
# merge speeches
logger.info('Merging %d speeches', len(all_speeches))
merged_speeches = ' '.join(all_speeches)
# write to file
with open('./data/merged.txt', 'wt', encoding='utf8') as speeches_file:
speeches_file.write(merged_speeches)
return merged_speeches
def extract_word_vectors(sentences, try_cached=True):
# check if already extracted
if Path(VOCAB_VECTORS_FILE).exists() and try_cached:
with open(VOCAB_VECTORS_FILE, 'rb') as pickle_file:
return pickle.load(pickle_file)
# creating dictionary
words_speeches = {w.lower() for s in sentences for w in s.words}
# download word vectors if necessary
local_vec_file = Path('wiki.de.vec').absolute()
if not local_vec_file.exists():
logger.info('Downloading word vectors. This might take a while... ')
download_word_vectors(str(local_vec_file))
# extract necessary word vectors
word_vectors = OrderedDict()
vec_iter = 0
with open(str(local_vec_file), 'r') as vector_file:
for line in vector_file:
columns = line.split()
if len(columns) == 301: # word plus vector
word = columns[0]
if word in words_speeches:
if word in word_vectors: # word may appear twice
continue
vector = [float(v) for v in columns[-300:]]
word_vec = WordVector(id=len(word_vectors), word=word, vector=vector)
word_vectors[word] = word_vec
vec_iter += 1
if vec_iter % 50000 == 0:
logger.info('Checked {} words. Matches: {}'.format(
vec_iter, len(word_vectors)))
logger.info('Matches: {}. Not found: {}'.format(
len(word_vectors), len(words_speeches) - len(word_vectors)))
# store extracted vectors
with open(VOCAB_VECTORS_FILE, 'wb') as pickle_file:
pickle.dump(word_vectors, pickle_file)
logger.info(
'Wrote {} word vectors to {}'.format(len(word_vectors), VOCAB_VECTORS_FILE))
return word_vectors
def download_word_vectors(local_file):
def reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
logger.info('Downloading word vectors. This might take a while... ')
wordvec_url = 'https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki.de.vec'
urllib.request.urlretrieve(wordvec_url, local_file, reporthook)
class SpeechSequence(Sequence):
def __init__(self, sentences, output_size, batch_size, word_vectors, sequence_len,
oov_token=None):
self.batch_size = batch_size
self.sequence_length = sequence_len
self.oov_token = oov_token or '<UNK>'
self.raw_sentences = None
self.input_encoded = None
self.sequences = None
self.next_words = None
self.corpus_size = None
self.input_words = None
self.output_encoded = None
self.words_raw = [w.lower() for sent in sentences for w in sent.words]
# build input vocabulary
logger.debug('Building input vocabulary...')
self.word_vectors = word_vectors.copy()
self.word_vectors[self.oov_token] = WordVector(word=self.oov_token,
id=len(word_vectors),
vector=[0.0] * 300)
self.input_vocab = {w: wv.id for w, wv in self.word_vectors.items()}
self.input_word_ids = {v: k for k, v in self.input_vocab.items()}
self.input_vocab_size = len(self.word_vectors)
self.input_unk_id = len(word_vectors)
# tokenize and build OUTPUT vocabulary
logger.debug('Building output vocabulary...')
word_counts_raw = Counter(self.words_raw)
most_com = word_counts_raw.most_common(output_size - 1) # oov token will be added
output_w = sorted([tup[0] for tup in most_com])
self.output_vocab = {w: i for i, w in enumerate(output_w) if i < output_size}
self.output_vocab[self.oov_token] = len(self.output_vocab) # last element is oov
self.output_word_ids = {v: k for k, v in self.output_vocab.items()}
self.output_unk_id = self.output_vocab[self.oov_token]
self.output_vocab_size = len(self.output_vocab)
self.output_word_counts = {w: c for w, c in most_com}
output_unks = sum([v for k, v in word_counts_raw.items()
if k not in self.output_vocab])
self.output_word_counts[self.oov_token] = output_unks
logger.debug('Tokenizied OUTPUT words. Vocab size: %d, Corpus size: %d, Unks %d',
self.output_vocab_size, len(self.words_raw), output_unks)
# encoding words
logger.debug('Encoding words...')
input_words = []
for word in self.words_raw:
input_word = word if word in self.word_vectors else self.oov_token
input_words.append(input_word)
# count words in vocabulary
self.input_word_counts = Counter(input_words)
self.input_vocab_size = len(self.input_word_counts)
input_corpus_size = len(input_words)
logger.debug('Tokenizied INPUT words. Vocab size: %d, Corpus size: %d, Unks %d',
self.input_vocab_size, input_corpus_size,
self.input_word_counts[self.oov_token])
def save(self, path=None):
path = path or DATASET_FILE
with open(path, 'wb') as pickle_file:
pickle.dump(self, pickle_file)
@staticmethod
def load(path):
path = path or DATASET_FILE
with open(path, 'rb') as pickle_file:
return pickle.load(pickle_file)
def adapt(self, sentences):
# encode words
words = [w.lower() for sent in sentences for w in sent.words]
# filter single letter words
def map_words(word):
if len(word) == 1 and word not in ['.', ',']:
return self.oov_token
return word
words = list(map(map_words, words))
logger.info('Adapting {} words'.format(len(words)))
self.input_encoded = [self.input_vocab.get(w, self.input_unk_id)
for w in words]
self.output_encoded = [self.output_vocab.get(w, self.output_unk_id)
for w in words]
# create word sequences
input_sequences = list()
output_sequences = list()
logger.debug('Creating training sequences...')
for i in range(self.sequence_length, len(self.input_encoded)):
input_sequence = self.input_encoded[i - self.sequence_length:i + 1]
input_sequences.append(input_sequence)
output_sequence = self.output_encoded[i - self.sequence_length:i + 1]
output_sequences.append(output_sequence)
logger.debug('Created sequences. Total Sequences: %d' % len(input_sequences))
# split into x and y elements
input_sequences = np.array(input_sequences)
output_sequences = np.array(output_sequences)
self.sequences, self.next_words = input_sequences[:, :-1], output_sequences[:, -1]
def in_to_out(self, word_id):
word = self.input_word_ids.get(word_id, self.oov_token)
return self.output_vocab.get(word, self.output_unk_id)
def out_to_in(self, word_id):
word = self.output_word_ids.get(word_id, self.oov_token)
return self.input_vocab.get(word, self.input_unk_id)
def decode_input(self, encoded):
return [self.input_word_ids[e] for e in encoded]
def decode_input_string(self, encoded):
return ' '.join(self.decode_input(encoded))
def decode_output(self, encoded):
return [self.output_word_ids[e] for e in encoded]
def decode_output_string(self, encoded):
return ' '.join(self.decode_output(encoded))
def encode_output(self, words):
return [self.output_vocab.get(w.lower(), self.output_unk_id) for w in words]
def encode_input(self, words):
return [self.input_vocab.get(w.lower(), self.input_unk_id) for w in words]
def __len__(self):
return int(np.ceil(len(self.sequences) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.sequences[idx * self.batch_size:(idx + 1) * self.batch_size]
next_words = self.next_words[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = to_categorical(next_words, num_classes=self.output_vocab_size)
return batch_x, batch_y
def convert_vocab():
with open('./data/dataset.pickle', 'rb') as pickle_file:
dataset = pickle.load(pickle_file) # type: SpeechSequence
vocab = {'input': dataset.input_vocab, 'output': dataset.output_vocab}
with open('./data/raw_vocab.pickle', 'wb') as pickle_file:
pickle.dump(vocab, pickle_file)
logger.info('Converted vobabulary')
if __name__ == '__main__':
# collect data from open data portal
# collect()
# splits data into text files for each politician
# pre_process()
# merges all speeches into one text file
# merge()
# extracts sentences and assign them to politicians
sentences = extract_sentences(try_cached=True)
word_vecs = extract_word_vectors(sentences)
#
# dataset = SpeechSequence(sentences=sentences, output_size=5000, batch_size=50,
# word_vectors=word_vecs, sequence_len=15, oov_token='<UNK>')
# dataset.adapt(sentences=sentences)
# convert_vocab()
``` |
{
"source": "jinniahn/curl_parser",
"score": 3
} |
#### File: curl_parser/curl_parser/parser.py
```python
from pprint import pprint
from argparse import ArgumentParser
from urllib.parse import parse_qsl
from http import cookies
import shlex
def parse(cmd):
# split
cmd = shlex.split(cmd)[1:]
# make argument parser to parse
parser = ArgumentParser()
parser.add_argument("-H", dest="headers", action="append")
parser.add_argument('url')
parser.add_argument('--data', dest="data")
parser.add_argument('--compressed', action='store_true')
try:
args = parser.parse_args(cmd)
ret = vars(args)
except Exception as e:
pprint(e)
return None
# fix(??)
if args.data and args.data[0] == '$':
args.data = args.data[1:]
if args.data:
ret['data'] = parse_qsl(args.data)
headers = {}
for i in args.headers:
k, v = tuple(map(str.strip, i.split(':', 1)))
if k in headers:
headers[k] = list(headers[k])
headers[k].append(v)
else:
headers[k] = v
# cookie
if "Cookie" in headers:
C = cookies.SimpleCookie()
C.load(headers['Cookie'])
c = dict(( (k,C[k].value) for k in C ))
headers['Cookie'] = None
ret["cookies"] = c
ret["headers"] = headers
return ret
``` |
{
"source": "jinniahn/jsh",
"score": 4
} |
#### File: jsh/jsh/loghandler.py
```python
class LineHandler():
'''Print Log line by line'''
def __init__(self):
self.last_line = None
self.first = True
def write(self, msg):
if self.first:
self.first = False
self.on_start()
lines = msg.split('\n')
if lines:
self.last_line = lines[-1]
lines = lines[:-1]
for l in lines:
self.on_line(l)
def close(self):
if self.last_line:
self.on_line(l)
self.on_end()
def on_line(self, line):
pass
def on_start(self):
pass
def on_end(self):
pass
``` |
{
"source": "jinnig/airbyte",
"score": 2
} |
#### File: source-intercom/source_intercom/source.py
```python
import time
from abc import ABC
from datetime import datetime
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import requests
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.auth import HttpAuthenticator, TokenAuthenticator
class IntercomStream(HttpStream, ABC):
url_base = "https://api.intercom.io/"
# https://developers.intercom.com/intercom-api-reference/reference#rate-limiting
queries_per_minute = 1000 # 1000 queries per minute == 16.67 req per sec
primary_key = "id"
data_fields = ["data"]
def __init__(
self,
authenticator: HttpAuthenticator,
start_date: str = None,
**kwargs,
):
self.start_date = start_date
super().__init__(authenticator=authenticator)
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
"""
Abstract method of HttpStream - should be overwritten.
Returning None means there are no more pages to read in response.
"""
next_page = response.json().get("pages", {}).get("next")
if next_page:
return {"starting_after": next_page["starting_after"]}
else:
return None
def request_params(self, next_page_token: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = {}
if next_page_token:
params.update(**next_page_token)
return params
def request_headers(self, **kwargs) -> Mapping[str, Any]:
return {"Accept": "application/json"}
def read_records(self, *args, **kwargs) -> Iterable[Mapping[str, Any]]:
try:
yield from super().read_records(*args, **kwargs)
except requests.exceptions.HTTPError as e:
error_message = e.response.text
if error_message:
self.logger.error(f"Stream {self.name}: {e.response.status_code} " f"{e.response.reason} - {error_message}")
raise e
def get_data(self, response: requests.Response) -> List:
data = response.json()
for data_field in self.data_fields:
if data and isinstance(data, dict):
data = data.get(data_field, [])
if isinstance(data, list):
data = data
elif isinstance(data, dict):
data = [data]
return data
def parse_response(self, response: requests.Response, stream_state: Mapping[str, Any], **kwargs) -> Iterable[Mapping]:
data = self.get_data(response)
for record in data:
yield record
# This is probably overkill because the request itself likely took more
# than the rate limit, but keep it just to be safe.
time.sleep(60.0 / self.queries_per_minute)
class IncrementalIntercomStream(IntercomStream, ABC):
cursor_field = "updated_at"
def filter_by_state(self, stream_state: Mapping[str, Any] = None, record: Mapping[str, Any] = None) -> Iterable:
"""
Endpoint does not provide query filtering params, but they provide us
updated_at field in most cases, so we used that as incremental filtering
during the slicing.
"""
if not stream_state or record[self.cursor_field] >= stream_state.get(self.cursor_field):
yield record
def parse_response(self, response: requests.Response, stream_state: Mapping[str, Any], **kwargs) -> Iterable[Mapping]:
record = super().parse_response(response, stream_state, **kwargs)
for record in record:
updated_at = record.get(self.cursor_field)
if updated_at:
record[self.cursor_field] = datetime.fromtimestamp(
record[self.cursor_field]
).isoformat() # convert timestamp to datetime string
yield from self.filter_by_state(stream_state=stream_state, record=record)
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, any]:
"""
This method is called once for each record returned from the API to
compare the cursor field value in that record with the current state
we then return an updated state object. If this is the first time we
run a sync or no state was passed, current_stream_state will be None.
"""
current_stream_state = current_stream_state or {}
current_stream_state_date = current_stream_state.get(self.cursor_field, self.start_date)
latest_record_date = latest_record.get(self.cursor_field, self.start_date)
return {self.cursor_field: max(current_stream_state_date, latest_record_date)}
class ChildStreamMixin:
parent_stream_class: Optional[IntercomStream] = None
def stream_slices(self, sync_mode, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
for item in self.parent_stream_class(authenticator=self.authenticator, start_date=self.start_date).read_records(
sync_mode=sync_mode
):
yield {"id": item["id"]}
yield from []
class Admins(IntercomStream):
"""Return list of all admins.
API Docs: https://developers.intercom.com/intercom-api-reference/reference#list-admins
Endpoint: https://api.intercom.io/admins
"""
data_fields = ["admins"]
def path(self, **kwargs) -> str:
return "admins"
class Companies(IncrementalIntercomStream):
"""Return list of all companies.
API Docs: https://developers.intercom.com/intercom-api-reference/reference#iterating-over-all-companies
Endpoint: https://api.intercom.io/companies/scroll
"""
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
"""For reset scroll needs to iterate pages untill the last.
Another way need wait 1 min for the scroll to expire to get a new list for companies segments."""
data = response.json().get("data")
if data:
return {"scroll_param": response.json()["scroll_param"]}
else:
return None
def path(self, **kwargs) -> str:
return "companies/scroll"
class CompanySegments(ChildStreamMixin, IncrementalIntercomStream):
"""Return list of all company segments.
API Docs: https://developers.intercom.com/intercom-api-reference/reference#list-attached-segments-1
Endpoint: https://api.intercom.io/companies/<id>/segments
"""
parent_stream_class = Companies
def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
return f"/companies/{stream_slice['id']}/segments"
class Conversations(IncrementalIntercomStream):
"""Return list of all conversations.
API Docs: https://developers.intercom.com/intercom-api-reference/reference#list-conversations
Endpoint: https://api.intercom.io/conversations
"""
data_fields = ["conversations"]
def path(self, **kwargs) -> str:
return "conversations"
class ConversationParts(ChildStreamMixin, IncrementalIntercomStream):
"""Return list of all conversation parts.
API Docs: https://developers.intercom.com/intercom-api-reference/reference#retrieve-a-conversation
Endpoint: https://api.intercom.io/conversations/<id>
"""
data_fields = ["conversation_parts", "conversation_parts"]
parent_stream_class = Conversations
def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
return f"/conversations/{stream_slice['id']}"
class Segments(IncrementalIntercomStream):
"""Return list of all segments.
API Docs: https://developers.intercom.com/intercom-api-reference/reference#list-segments
Endpoint: https://api.intercom.io/segments
"""
data_fields = ["segments"]
def path(self, **kwargs) -> str:
return "segments"
class Contacts(IncrementalIntercomStream):
"""Return list of all contacts.
API Docs: https://developers.intercom.com/intercom-api-reference/reference#list-contacts
Endpoint: https://api.intercom.io/contacts
"""
def path(self, **kwargs) -> str:
return "contacts"
class DataAttributes(IntercomStream):
primary_key = "name"
def path(self, **kwargs) -> str:
return "data_attributes"
class CompanyAttributes(DataAttributes):
"""Return list of all data attributes belonging to a workspace for companies.
API Docs: https://developers.intercom.com/intercom-api-reference/reference#list-data-attributes
Endpoint: https://api.intercom.io/data_attributes?model=company
"""
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
return {"model": "company"}
class ContactAttributes(DataAttributes):
"""Return list of all data attributes belonging to a workspace for contacts.
API Docs: https://developers.intercom.com/intercom-api-reference/reference#list-data-attributes
Endpoint: https://api.intercom.io/data_attributes?model=contact
"""
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
return {"model": "contact"}
class Tags(IntercomStream):
"""Return list of all tags.
API Docs: https://developers.intercom.com/intercom-api-reference/reference#list-tags-for-an-app
Endpoint: https://api.intercom.io/tags
"""
primary_key = "name"
def path(self, **kwargs) -> str:
return "tags"
class Teams(IntercomStream):
"""Return list of all teams.
API Docs: https://developers.intercom.com/intercom-api-reference/reference#list-teams
Endpoint: https://api.intercom.io/teams
"""
primary_key = "name"
data_fields = ["teams"]
def path(self, **kwargs) -> str:
return "teams"
class SourceIntercom(AbstractSource):
"""
Source Intercom fetch data from messaging platform.
"""
def check_connection(self, logger, config) -> Tuple[bool, any]:
authenticator = TokenAuthenticator(token=config["access_token"])
try:
url = f"{IntercomStream.url_base}/tags"
auth_headers = {"Accept": "application/json", **authenticator.get_auth_header()}
session = requests.get(url, headers=auth_headers)
session.raise_for_status()
return True, None
except requests.exceptions.RequestException as e:
return False, e
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
AirbyteLogger().log("INFO", f"Using start_date: {config['start_date']}")
auth = TokenAuthenticator(token=config["access_token"])
return [
Admins(authenticator=auth, **config),
Companies(authenticator=auth, **config),
CompanySegments(authenticator=auth, **config),
Conversations(authenticator=auth, **config),
ConversationParts(authenticator=auth, **config),
Contacts(authenticator=auth, **config),
CompanyAttributes(authenticator=auth, **config),
ContactAttributes(authenticator=auth, **config),
Segments(authenticator=auth, **config),
Tags(authenticator=auth, **config),
Teams(authenticator=auth, **config),
]
```
#### File: source-s3/unit_tests/test_fileformatparser.py
```python
import os
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, List, Mapping
import pyarrow as pa
import pytest
from airbyte_cdk import AirbyteLogger
from smart_open import open as smart_open
from source_s3.source_files_abstract.fileformatparser import CsvParser, FileFormatParser
LOGGER = AirbyteLogger()
SAMPLE_DIRECTORY = Path(__file__).resolve().parent.joinpath("sample_files/")
class TestFileFormatParserStatics:
@pytest.mark.parametrize( # testing all datatypes as laid out here: https://json-schema.org/understanding-json-schema/reference/type.html
"input_json_type, output_pyarrow_type",
[
("string", pa.large_string()),
("number", pa.float64()),
("integer", pa.int64()),
("object", pa.large_string()),
("array", pa.large_string()),
("boolean", pa.bool_()),
("null", pa.large_string()),
],
)
def test_json_type_to_pyarrow_type(self, input_json_type, output_pyarrow_type):
# Json -> PyArrow direction
LOGGER.info(f"asserting that JSON type '{input_json_type}' converts to PyArrow type '{output_pyarrow_type}'...")
assert FileFormatParser.json_type_to_pyarrow_type(input_json_type) == output_pyarrow_type
@pytest.mark.parametrize( # testing all datatypes as laid out here: https://arrow.apache.org/docs/python/api/datatypes.html
"input_pyarrow_types, output_json_type",
[
((pa.null(),), "string"), # null type
((pa.bool_(),), "boolean"), # boolean type
(
(pa.int8(), pa.int16(), pa.int32(), pa.int64(), pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()),
"integer",
), # integer types
((pa.float16(), pa.float32(), pa.float64(), pa.decimal128(5, 10), pa.decimal256(3, 8)), "number"), # number types
((pa.time32("s"), pa.time64("ns"), pa.timestamp("ms"), pa.date32(), pa.date64()), "string"), # temporal types
((pa.binary(), pa.large_binary()), "string"), # binary types
((pa.string(), pa.utf8(), pa.large_string(), pa.large_utf8()), "string"), # string types
((pa.list_(pa.string()), pa.large_list(pa.timestamp("us"))), "string"), # array types
((pa.map_(pa.string(), pa.float32()), pa.dictionary(pa.int16(), pa.list_(pa.string()))), "string"), # object types
],
)
def test_json_type_to_pyarrow_type_reverse(self, input_pyarrow_types, output_json_type):
# PyArrow -> Json direction (reverse=True)
for typ in input_pyarrow_types:
LOGGER.info(f"asserting that PyArrow type '{typ}' converts to JSON type '{output_json_type}'...")
assert FileFormatParser.json_type_to_pyarrow_type(typ, reverse=True) == output_json_type
@pytest.mark.parametrize( # if expecting fail, put pyarrow_schema as None
"json_schema, pyarrow_schema",
[
(
{"a": "string", "b": "number", "c": "integer", "d": "object", "e": "array", "f": "boolean", "g": "null"},
{
"a": pa.large_string(),
"b": pa.float64(),
"c": pa.int64(),
"d": pa.large_string(),
"e": pa.large_string(),
"f": pa.bool_(),
"g": pa.large_string(),
},
),
({"single_column": "object"}, {"single_column": pa.large_string()}),
({}, {}),
({"a": "NOT A REAL TYPE", "b": "another fake type"}, {"a": pa.large_string(), "b": pa.large_string()}),
(["string", "object"], None), # bad input type
],
)
def test_json_schema_to_pyarrow_schema(self, json_schema, pyarrow_schema):
# Json -> PyArrow direction
if pyarrow_schema is not None:
assert FileFormatParser.json_schema_to_pyarrow_schema(json_schema) == pyarrow_schema
else:
with pytest.raises(Exception) as e_info:
FileFormatParser.json_schema_to_pyarrow_schema(json_schema)
LOGGER.debug(str(e_info))
@pytest.mark.parametrize( # if expecting fail, put json_schema as None
"pyarrow_schema, json_schema",
[
(
{
"a": pa.utf8(),
"b": pa.float16(),
"c": pa.uint32(),
"d": pa.map_(pa.string(), pa.float32()),
"e": pa.bool_(),
"f": pa.date64(),
},
{"a": "string", "b": "number", "c": "integer", "d": "string", "e": "boolean", "f": "string"},
),
({"single_column": pa.int32()}, {"single_column": "integer"}),
({}, {}),
({"a": "NOT A REAL TYPE", "b": "another fake type"}, {"a": "string", "b": "string"}),
(["string", "object"], None), # bad input type
],
)
def test_json_schema_to_pyarrow_schema_reverse(self, pyarrow_schema, json_schema):
# PyArrow -> Json direction (reverse=True)
if json_schema is not None:
assert FileFormatParser.json_schema_to_pyarrow_schema(pyarrow_schema, reverse=True) == json_schema
else:
with pytest.raises(Exception) as e_info:
FileFormatParser.json_schema_to_pyarrow_schema(pyarrow_schema, reverse=True)
LOGGER.debug(str(e_info))
class AbstractTestFileFormatParser(ABC):
""" Prefix this class with Abstract so the tests don't run here but only in the children """
@property
@abstractmethod
def test_files(self) -> List[Mapping[str, Any]]:
"""return a list of test_file dicts in structure:
[
{"fileformatparser": CsvParser(format, master_schema), "filepath": "...", "num_records": 5, "inferred_schema": {...}, line_checks:{}, fails: []},
{"fileformatparser": CsvParser(format, master_schema), "filepath": "...", "num_records": 16, "inferred_schema": {...}, line_checks:{}, fails: []}
]
note: line_checks index is 1-based to align with row numbers
"""
def _get_readmode(self, test_name, test_file):
LOGGER.info(f"testing {test_name}() with {test_file.get('test_alias', test_file['filepath'].split('/')[-1])} ...")
return "rb" if test_file["fileformatparser"].is_binary else "r"
def test_get_inferred_schema(self):
for test_file in self.test_files:
with smart_open(test_file["filepath"], self._get_readmode("get_inferred_schema", test_file)) as f:
if "test_get_inferred_schema" in test_file["fails"]:
with pytest.raises(Exception) as e_info:
test_file["fileformatparser"].get_inferred_schema(f)
LOGGER.debug(str(e_info))
else:
assert test_file["fileformatparser"].get_inferred_schema(f) == test_file["inferred_schema"]
def test_stream_records(self):
for test_file in self.test_files:
with smart_open(test_file["filepath"], self._get_readmode("stream_records", test_file)) as f:
if "test_stream_records" in test_file["fails"]:
with pytest.raises(Exception) as e_info:
[print(r) for r in test_file["fileformatparser"].stream_records(f)]
LOGGER.debug(str(e_info))
else:
records = [r for r in test_file["fileformatparser"].stream_records(f)]
assert len(records) == test_file["num_records"]
for index, expected_record in test_file["line_checks"].items():
assert records[index - 1] == expected_record
class TestCsvParser(AbstractTestFileFormatParser):
@property
def test_files(self) -> List[Mapping[str, Any]]:
return [
{
# basic 'normal' test
"fileformatparser": CsvParser(
format={"filetype": "csv"},
master_schema={
"id": "integer",
"name": "string",
"valid": "boolean",
"code": "integer",
"degrees": "number",
"birthday": "string",
"last_seen": "string",
},
),
"filepath": os.path.join(SAMPLE_DIRECTORY, "csv/test_file_1.csv"),
"num_records": 8,
"inferred_schema": {
"id": "integer",
"name": "string",
"valid": "boolean",
"code": "integer",
"degrees": "number",
"birthday": "string",
"last_seen": "string",
},
"line_checks": {},
"fails": [],
},
{
# tests custom CSV parameters (odd delimiter, quote_char, escape_char & newlines in values in the file)
"test_alias": "custom csv parameters",
"fileformatparser": CsvParser(
format={"filetype": "csv", "delimiter": "^", "quote_char": "|", "escape_char": "!", "newlines_in_values": True},
master_schema={
"id": "integer",
"name": "string",
"valid": "boolean",
"code": "integer",
"degrees": "number",
"birthday": "string",
"last_seen": "string",
},
),
"filepath": os.path.join(SAMPLE_DIRECTORY, "csv/test_file_2_params.csv"),
"num_records": 8,
"inferred_schema": {
"id": "integer",
"name": "string",
"valid": "boolean",
"code": "integer",
"degrees": "number",
"birthday": "string",
"last_seen": "string",
},
"line_checks": {},
"fails": [],
},
{
# tests encoding: Big5
"test_alias": "encoding: Big5",
"fileformatparser": CsvParser(
format={"filetype": "csv", "encoding": "big5"}, master_schema={"id": "integer", "name": "string", "valid": "boolean"}
),
"filepath": os.path.join(SAMPLE_DIRECTORY, "csv/test_file_3_enc_Big5.csv"),
"num_records": 8,
"inferred_schema": {"id": "integer", "name": "string", "valid": "boolean"},
"line_checks": {
3: {
"id": 3,
"name": "變形金剛,偽裝的機器人",
"valid": False,
}
},
"fails": [],
},
{
# tests encoding: Arabic (Windows 1256)
"test_alias": "encoding: Arabic (Windows 1256)",
"fileformatparser": CsvParser(
format={"filetype": "csv", "encoding": "windows-1256"},
master_schema={"id": "integer", "notes": "string", "valid": "boolean"},
),
"filepath": os.path.join(SAMPLE_DIRECTORY, "csv/test_file_4_enc_Arabic.csv"),
"num_records": 2,
"inferred_schema": {"id": "integer", "notes": "string", "valid": "boolean"},
"line_checks": {
1: {
"id": 1,
"notes": "البايت الجوي هو الأفضل",
"valid": False,
}
},
"fails": [],
},
{
# tests compression: gzip
"test_alias": "compression: gzip",
"fileformatparser": CsvParser(
format={"filetype": "csv"},
master_schema={
"id": "integer",
"name": "string",
"valid": "boolean",
"code": "integer",
"degrees": "number",
"birthday": "string",
"last_seen": "string",
},
),
"filepath": os.path.join(SAMPLE_DIRECTORY, "csv/test_file_5.csv.gz"),
"num_records": 8,
"inferred_schema": {
"id": "integer",
"name": "string",
"valid": "boolean",
"code": "integer",
"degrees": "number",
"birthday": "string",
"last_seen": "string",
},
"line_checks": {
7: {
"id": 7,
"name": "xZhh1Kyl",
"valid": False,
"code": 10,
"degrees": -9.2,
"birthday": "2021-07-14",
"last_seen": "2021-07-14 15:30:09.225145",
}
},
"fails": [],
},
{
# tests compression: bz2
"test_alias": "compression: bz2",
"fileformatparser": CsvParser(
format={"filetype": "csv"},
master_schema={
"id": "integer",
"name": "string",
"valid": "boolean",
"code": "integer",
"degrees": "number",
"birthday": "string",
"last_seen": "string",
},
),
"filepath": os.path.join(SAMPLE_DIRECTORY, "csv/test_file_7_bz2.csv.bz2"),
"num_records": 8,
"inferred_schema": {
"id": "integer",
"name": "string",
"valid": "boolean",
"code": "integer",
"degrees": "number",
"birthday": "string",
"last_seen": "string",
},
"line_checks": {
7: {
"id": 7,
"name": "xZhh1Kyl",
"valid": False,
"code": 10,
"degrees": -9.2,
"birthday": "2021-07-14",
"last_seen": "2021-07-14 15:30:09.225145",
}
},
"fails": [],
},
{
# tests extra columns in master schema
"test_alias": "extra columns in master schema",
"fileformatparser": CsvParser(
format={"filetype": "csv"},
master_schema={
"EXTRA_COLUMN_1": "boolean",
"EXTRA_COLUMN_2": "number",
"id": "integer",
"name": "string",
"valid": "boolean",
"code": "integer",
"degrees": "number",
"birthday": "string",
"last_seen": "string",
},
),
"filepath": os.path.join(SAMPLE_DIRECTORY, "csv/test_file_1.csv"),
"num_records": 8,
"inferred_schema": {
"id": "integer",
"name": "string",
"valid": "boolean",
"code": "integer",
"degrees": "number",
"birthday": "string",
"last_seen": "string",
},
"line_checks": {},
"fails": [],
},
{
# tests missing columns in master schema
# TODO: maybe this should fail read_records, but it does pick up all the columns from file despite missing from master schema
"test_alias": "missing columns in master schema",
"fileformatparser": CsvParser(format={"filetype": "csv"}, master_schema={"id": "integer", "name": "string"}),
"filepath": os.path.join(SAMPLE_DIRECTORY, "csv/test_file_1.csv"),
"num_records": 8,
"inferred_schema": {
"id": "integer",
"name": "string",
"valid": "boolean",
"code": "integer",
"degrees": "number",
"birthday": "string",
"last_seen": "string",
},
"line_checks": {},
"fails": [],
},
{
# tests empty file, SHOULD FAIL INFER & STREAM RECORDS
"test_alias": "empty csv file",
"fileformatparser": CsvParser(format={"filetype": "csv"}, master_schema={}),
"filepath": os.path.join(SAMPLE_DIRECTORY, "csv/test_file_6_empty.csv"),
"num_records": 0,
"inferred_schema": {},
"line_checks": {},
"fails": ["test_get_inferred_schema", "test_stream_records"],
},
]
``` |
{
"source": "jinnig/superset",
"score": 2
} |
#### File: superset/charts/post_processing.py
```python
from typing import Any, Callable, Dict, Optional, Union
import pandas as pd
from superset.utils.core import DTTM_ALIAS, extract_dataframe_dtypes, get_metric_name
def sql_like_sum(series: pd.Series) -> pd.Series:
"""
A SUM aggregation function that mimics the behavior from SQL.
"""
return series.sum(min_count=1)
def pivot_table(
result: Dict[Any, Any], form_data: Optional[Dict[str, Any]] = None
) -> Dict[Any, Any]:
"""
Pivot table.
"""
for query in result["queries"]:
data = query["data"]
df = pd.DataFrame(data)
form_data = form_data or {}
if form_data.get("granularity") == "all" and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
metrics = [get_metric_name(m) for m in form_data["metrics"]]
aggfuncs: Dict[str, Union[str, Callable[[Any], Any]]] = {}
for metric in metrics:
aggfunc = form_data.get("pandas_aggfunc") or "sum"
if pd.api.types.is_numeric_dtype(df[metric]):
if aggfunc == "sum":
aggfunc = sql_like_sum
elif aggfunc not in {"min", "max"}:
aggfunc = "max"
aggfuncs[metric] = aggfunc
groupby = form_data.get("groupby") or []
columns = form_data.get("columns") or []
if form_data.get("transpose_pivot"):
groupby, columns = columns, groupby
df = df.pivot_table(
index=groupby,
columns=columns,
values=metrics,
aggfunc=aggfuncs,
margins=form_data.get("pivot_margins"),
)
# Re-order the columns adhering to the metric ordering.
df = df[metrics]
# Display metrics side by side with each column
if form_data.get("combine_metric"):
df = df.stack(0).unstack().reindex(level=-1, columns=metrics)
# flatten column names
df.columns = [" ".join(column) for column in df.columns]
# re-arrange data into a list of dicts
data = []
for i in df.index:
row = {col: df[col][i] for col in df.columns}
row[df.index.name] = i
data.append(row)
query["data"] = data
query["colnames"] = list(df.columns)
query["coltypes"] = extract_dataframe_dtypes(df)
query["rowcount"] = len(df.index)
return result
def list_unique_values(series: pd.Series) -> str:
"""
List unique values in a series.
"""
return ", ".join(set(str(v) for v in pd.Series.unique(series)))
pivot_v2_aggfunc_map = {
"Count": pd.Series.count,
"Count Unique Values": pd.Series.nunique,
"List Unique Values": list_unique_values,
"Sum": pd.Series.sum,
"Average": pd.Series.mean,
"Median": pd.Series.median,
"Sample Variance": lambda series: pd.series.var(series) if len(series) > 1 else 0,
"Sample Standard Deviation": (
lambda series: pd.series.std(series) if len(series) > 1 else 0,
),
"Minimum": pd.Series.min,
"Maximum": pd.Series.max,
"First": lambda series: series[:1],
"Last": lambda series: series[-1:],
"Sum as Fraction of Total": pd.Series.sum,
"Sum as Fraction of Rows": pd.Series.sum,
"Sum as Fraction of Columns": pd.Series.sum,
"Count as Fraction of Total": pd.Series.count,
"Count as Fraction of Rows": pd.Series.count,
"Count as Fraction of Columns": pd.Series.count,
}
def pivot_table_v2( # pylint: disable=too-many-branches
result: Dict[Any, Any], form_data: Optional[Dict[str, Any]] = None,
) -> Dict[Any, Any]:
"""
Pivot table v2.
"""
for query in result["queries"]:
data = query["data"]
df = pd.DataFrame(data)
form_data = form_data or {}
if form_data.get("granularity_sqla") == "all" and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
# TODO (betodealmeida): implement metricsLayout
metrics = [get_metric_name(m) for m in form_data["metrics"]]
aggregate_function = form_data.get("aggregateFunction", "Sum")
groupby = form_data.get("groupbyRows") or []
columns = form_data.get("groupbyColumns") or []
if form_data.get("transposePivot"):
groupby, columns = columns, groupby
df = df.pivot_table(
index=groupby,
columns=columns,
values=metrics,
aggfunc=pivot_v2_aggfunc_map[aggregate_function],
margins=True,
)
# The pandas `pivot_table` method either brings both row/column
# totals, or none at all. We pass `margin=True` to get both, and
# remove any dimension that was not requests.
if not form_data.get("rowTotals"):
df.drop(df.columns[len(df.columns) - 1], axis=1, inplace=True)
if not form_data.get("colTotals"):
df = df[:-1]
# Compute fractions, if needed. If `colTotals` or `rowTotals` are
# present we need to adjust for including them in the sum
if aggregate_function.endswith(" as Fraction of Total"):
total = df.sum().sum()
df = df.astype(total.dtypes) / total
if form_data.get("colTotals"):
df *= 2
if form_data.get("rowTotals"):
df *= 2
elif aggregate_function.endswith(" as Fraction of Columns"):
total = df.sum(axis=0)
df = df.astype(total.dtypes).div(total, axis=1)
if form_data.get("colTotals"):
df *= 2
elif aggregate_function.endswith(" as Fraction of Rows"):
total = df.sum(axis=1)
df = df.astype(total.dtypes).div(total, axis=0)
if form_data.get("rowTotals"):
df *= 2
# Re-order the columns adhering to the metric ordering.
df = df[metrics]
# Display metrics side by side with each column
if form_data.get("combineMetric"):
df = df.stack(0).unstack().reindex(level=-1, columns=metrics)
# flatten column names
df.columns = [" ".join(column) for column in df.columns]
# re-arrange data into a list of dicts
data = []
for i in df.index:
row = {col: df[col][i] for col in df.columns}
row[df.index.name] = i
data.append(row)
query["data"] = data
query["colnames"] = list(df.columns)
query["coltypes"] = extract_dataframe_dtypes(df)
query["rowcount"] = len(df.index)
return result
post_processors = {
"pivot_table": pivot_table,
"pivot_table_v2": pivot_table_v2,
}
``` |
{
"source": "jinningz/courseraInteractivePython",
"score": 4
} |
#### File: jinningz/courseraInteractivePython/Project2_Guess_the_number.py
```python
import simplegui
import random
import math
number_range = 100
# helper function to start and restart the game
def new_game():
# initialize global variables used in your code here
global secret_number, number_range, number_guesses, max_guesses
max_guesses = math.ceil(math.log(number_range, 2))
number_guesses = 1
print ""
print "New game. Range is from 0 to", number_range
print "Number of remaining guesses is ", int(max_guesses)
secret_number = random.randrange(0, number_range)
# define event handlers for control panel
def range100():
# button that changes the range to [0,100) and starts a new game
global secret_number, number_range
number_range = 100
new_game()
def range1000():
# button that changes the range to [0,1000) and starts a new game
global secret_number, number_range
number_range = 1000
new_game()
def input_guess(guess):
# main game logic goes here
global secret_number, number_guesses, max_guesses
guess = int(guess)
print ""
print "Guess was ", guess
print "Number of remaining guesses is ", int(max_guesses - number_guesses)
if number_guesses < max_guesses:
if guess < secret_number:
print "Higher!"
elif guess > secret_number:
print "Lower!"
else:
print "Correct!"
new_game()
number_guesses += 1
else:
if guess == secret_number:
print "Correct!"
else:
print "You ran out of guesses. The number was", secret_number
new_game()
# create frame
f = simplegui.create_frame("Guess the number", 200, 200)
# register event handlers for control elements and start frame
f.add_button("Range is [0, 100)", range100, 200)
f.add_button("Range is [0, 1000)", range1000, 200)
f.add_button("Restart game", new_game)
inp = f.add_input('Enter a guess', input_guess, 50)
# call new_game
new_game()
# always remember to check your completed program against the grading rubric
``` |
{
"source": "jinniuai/dash-fasta",
"score": 3
} |
#### File: dash-fasta/components/table.py
```python
from dash import html
from utils.functions import formatter_2_decimals
def make_dash_table(df):
body = []
header = []
for column in df.columns:
header.append(html.Th(column))
for index, row in df.iterrows():
html_row = []
for i in range(len(row)):
html_row.append(html.Td(formatter_2_decimals(row[i])))
body.append(html.Tr(html_row))
tHead = html.Thead(html.Tr(header))
tBody = html.Tbody(body)
table = html.Table([tHead, tBody])
return table
```
#### File: jinniuai/dash-fasta/dashApp.py
```python
import dash
import dash_bootstrap_components as dbc
from dash import dcc, html
import dash_admin_components as dac
import flask
from utils.external_assets import ROOT, EXTERNAL_STYLESHEETS, FONT_AWSOME
from ui.main_content import layout
import datetime
import os
def create_dash_app(requests_pathname_prefix: str = None) -> dash.Dash:
# =============================================================================
# Dash App and Flask Server
# =============================================================================
server = flask.Flask(__name__)
#server.secret_key = os.environ.get('secret_key', 'secret')
app = dash.Dash(
name=__name__,
server=server,
#routes_pathname_prefix=requests_pathname_prefix,
requests_pathname_prefix=requests_pathname_prefix,
assets_folder=ROOT+"/assets",
suppress_callback_exceptions=True,
external_stylesheets=[
dbc.themes.CYBORG,
FONT_AWSOME,
#EXTERNAL_STYLESHEETS
],
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1"}
]
)
#app.scripts.config.serve_locally = False
#dcc._js_dist[0]['external_url'] = 'https://cdn.plot.ly/plotly-basic-latest.min.js'
app.layout = layout
return app
apps = create_dash_app(requests_pathname_prefix="/dash/")
```
#### File: dashPages/basic_boxes/callbacks.py
```python
from main import apps
from dash.dependencies import Input, Output, State
from components.example_plots import plot_scatter
# Update figure on slider change
@apps.callback(
Output('box-graph', 'figure'),
[Input('controlbar-slider', 'value')] )
def update_box_graph(value):
return plot_scatter(value)
``` |
{
"source": "jinniuai/easytrader",
"score": 2
} |
#### File: easytrader/config/client.py
```python
def create(broker):
if broker == "yh":
return YH
if broker == "ht":
return HT
if broker == "gj":
return GJ
if broker == "gf":
return GF
if broker == "ths":
return CommonConfig
if broker == "wk":
return WK
if broker == "htzq":
return HTZQ
raise NotImplementedError
class CommonConfig:
DEFAULT_EXE_PATH: str = ""
TITLE = "网上股票交易系统5.0"
# 交易所类型。 深圳A股、上海A股
TRADE_STOCK_EXCHANGE_CONTROL_ID = 1003
# 撤销界面上, 全部撤销按钮
TRADE_CANCEL_ALL_ENTRUST_CONTROL_ID = 30001
TRADE_SECURITY_CONTROL_ID = 1032
TRADE_PRICE_CONTROL_ID = 1033
TRADE_AMOUNT_CONTROL_ID = 1034
TRADE_SUBMIT_CONTROL_ID = 1006
TRADE_MARKET_TYPE_CONTROL_ID = 1541
COMMON_GRID_CONTROL_ID = 1047
COMMON_GRID_LEFT_MARGIN = 10
COMMON_GRID_FIRST_ROW_HEIGHT = 30
COMMON_GRID_ROW_HEIGHT = 16
BALANCE_MENU_PATH = ["查询[F4]", "资金股票"]
POSITION_MENU_PATH = ["查询[F4]", "资金股票"]
TODAY_ENTRUSTS_MENU_PATH = ["查询[F4]", "当日委托"]
TODAY_TRADES_MENU_PATH = ["查询[F4]", "当日成交"]
BALANCE_CONTROL_ID = 1308
POP_DIALOD_TITLE_CONTROL_ID = 1365
GRID_DTYPE = {
"操作日期": str,
"委托编号": str,
"申请编号": str,
"合同编号": str,
"证券代码": str,
"股东代码": str,
"资金帐号": str,
"资金帐户": str,
"发生日期": str,
}
CANCEL_ENTRUST_ENTRUST_FIELD = "合同编号"
CANCEL_ENTRUST_GRID_LEFT_MARGIN = 50
CANCEL_ENTRUST_GRID_FIRST_ROW_HEIGHT = 30
CANCEL_ENTRUST_GRID_ROW_HEIGHT = 16
AUTO_IPO_SELECT_ALL_BUTTON_CONTROL_ID = 1098
AUTO_IPO_BUTTON_CONTROL_ID = 1006
AUTO_IPO_MENU_PATH = ["新股申购", "批量新股申购"]
AUTO_IPO_NUMBER = '申购数量'
class YH(CommonConfig):
DEFAULT_EXE_PATH = r"C:\双子星-中国银河证券\Binarystar.exe"
BALANCE_GRID_CONTROL_ID = 1308
GRID_DTYPE = {
"操作日期": str,
"委托编号": str,
"申请编号": str,
"合同编号": str,
"证券代码": str,
"股东代码": str,
"资金帐号": str,
"资金帐户": str,
"发生日期": str,
}
AUTO_IPO_MENU_PATH = ["新股申购", "一键打新"]
class HT(CommonConfig):
DEFAULT_EXE_PATH = r"C:\htzqzyb2\xiadan.exe"
BALANCE_CONTROL_ID_GROUP = {
"资金余额": 1012,
"冻结资金": 1013,
"可用金额": 1016,
"可取金额": 1017,
"股票市值": 1014,
"总资产": 1015,
}
GRID_DTYPE = {
"操作日期": str,
"委托编号": str,
"申请编号": str,
"合同编号": str,
"证券代码": str,
"股东代码": str,
"资金帐号": str,
"资金帐户": str,
"发生日期": str,
}
AUTO_IPO_MENU_PATH = ["新股申购", "批量新股申购"]
class GJ(CommonConfig):
DEFAULT_EXE_PATH = "C:\\全能行证券交易终端\\xiadan.exe"
GRID_DTYPE = {
"操作日期": str,
"委托编号": str,
"申请编号": str,
"合同编号": str,
"证券代码": str,
"股东代码": str,
"资金帐号": str,
"资金帐户": str,
"发生日期": str,
}
AUTO_IPO_MENU_PATH = ["新股申购", "新股批量申购"]
class GF(CommonConfig):
DEFAULT_EXE_PATH = "C:\\gfzqrzrq\\xiadan.exe"
TITLE = "核新网上交易系统"
GRID_DTYPE = {
"操作日期": str,
"委托编号": str,
"申请编号": str,
"合同编号": str,
"证券代码": str,
"股东代码": str,
"资金帐号": str,
"资金帐户": str,
"发生日期": str,
}
AUTO_IPO_MENU_PATH = ["新股申购", "批量新股申购"]
class WK(HT):
pass
class HTZQ(CommonConfig):
DEFAULT_EXE_PATH = r"c:\\海通证券委托\\xiadan.exe"
BALANCE_CONTROL_ID_GROUP = {
"资金余额": 1012,
"可用金额": 1016,
"可取金额": 1017,
"总资产": 1015,
}
AUTO_IPO_NUMBER = '可申购数量'
``` |
{
"source": "jinniuai/qlib",
"score": 3
} |
#### File: tests/dataset_tests/test_datalayer.py
```python
import unittest
import numpy as np
from qlib.data import D
from qlib.tests import TestAutoData
class TestDataset(TestAutoData):
def testCSI300(self):
close_p = D.features(D.instruments("csi300"), ["$close"])
size = close_p.groupby("datetime").size()
cnt = close_p.groupby("datetime").count()["$close"]
size_desc = size.describe(percentiles=np.arange(0.1, 1.0, 0.1))
cnt_desc = cnt.describe(percentiles=np.arange(0.1, 1.0, 0.1))
print(size_desc)
print(cnt_desc)
self.assertLessEqual(size_desc.loc["max"], 305, "Excessive number of CSI300 constituent stocks")
self.assertGreaterEqual(size_desc.loc["80%"], 290, "Insufficient number of CSI300 constituent stocks")
self.assertLessEqual(cnt_desc.loc["max"], 305, "Excessive number of CSI300 constituent stocks")
# FIXME: Due to the low quality of data. Hard to make sure there are enough data
# self.assertEqual(cnt_desc.loc["80%"], 300, "Insufficient number of CSI300 constituent stocks")
def testClose(self):
close_p = D.features(D.instruments("csi300"), ["Ref($close, 1)/$close - 1"])
close_desc = close_p.describe(percentiles=np.arange(0.1, 1.0, 0.1))
print(close_desc)
self.assertLessEqual(abs(close_desc.loc["90%"][0]), 0.1, "Close value is abnormal")
self.assertLessEqual(abs(close_desc.loc["10%"][0]), 0.1, "Close value is abnormal")
# FIXME: The yahoo data is not perfect. We have to
# self.assertLessEqual(abs(close_desc.loc["max"][0]), 0.2, "Close value is abnormal")
# self.assertGreaterEqual(close_desc.loc["min"][0], -0.2, "Close value is abnormal")
def testJinniuai(self):
close_p = D.features(D.instruments("csi300"), ["$close"])
size = close_p.groupby("datetime").size()
cnt = close_p.groupby("datetime").count()["$close"]
size_desc = size.describe(percentiles=np.arange(0.1, 1.0, 0.1))
cnt_desc = cnt.describe(percentiles=np.arange(0.1, 1.0, 0.1))
print(size_desc)
print(cnt_desc)
self.assertLessEqual(size_desc.loc["max"], 305, "Excessive number of CSI300 constituent stocks")
self.assertGreaterEqual(size_desc.loc["80%"], 290, "Insufficient number of CSI300 constituent stocks")
self.assertLessEqual(cnt_desc.loc["max"], 305, "Excessive number of CSI300 constituent stocks")
# FIXME: Due to the low quality of data. Hard to make sure there are enough data
# self.assertEqual(cnt_desc.loc["80%"], 300, "Insufficient number of CSI300 constituent stocks")
close_p = D.features(D.instruments("csi300"), ["Ref($close, 1)/$close - 1"])
close_desc = close_p.describe(percentiles=np.arange(0.1, 1.0, 0.1))
print(close_desc)
self.assertLessEqual(abs(close_desc.loc["90%"][0]), 0.1, "Close value is abnormal")
self.assertLessEqual(abs(close_desc.loc["10%"][0]), 0.1, "Close value is abnormal")
# FIXME: The yahoo data is not perfect. We have to
# self.assertLessEqual(abs(close_desc.loc["max"][0]), 0.2, "Close value is abnormal")
# self.assertGreaterEqual(close_desc.loc["min"][0], -0.2, "Close value is abnormal")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "JinnLynn/alfred-workflows",
"score": 3
} |
#### File: src/chinese-tv-epg/epg.py
```python
import sys, os, re
import json
import warnings
import re
from datetime import datetime, timedelta
import alfred
alfred.setDefaultEncodingUTF8()
import bs4
from pprint import pprint
__version__ = '1.1'
_baseurl = 'http://tv.cntv.cn/epg'
_default_favs = ['cctv1', 'cctv2', 'cctv3']
def parseWebPage(url, **kwargs):
try:
res = alfred.request.get(url, **kwargs)
content = res.getContent()
# HACK: 获取节目列表的网页HTML TAG 错误可能造成beautiful soup解析死循环
# 需手动修改
content = re.sub(r'<a>\n', '</a>\n', content)
# 禁止显示BeautifulSoup警告
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return bs4.BeautifulSoup(content, fromEncoding='utf-8')
except Exception, e:
raise e
@alfred.cached('channels-list')
def fetchChannels():
soup = parseWebPage(_baseurl)
channels = {}
for item in soup.select('div.md_left_right'):
dl_tag = item.find('dl')
# 城市
if dl_tag.attrs.get('id', '') == 'cityList':
channel_tags = item.select('div.lv3 p a')
else:
channel_tags = item.select('dd a')
for c_tag in channel_tags:
chl_title = c_tag.get_text().strip()
chl_id = c_tag.attrs['rel'][0]
channels.update({chl_id:chl_title})
return channels
def fetchChannelEPG(channel, date, cache_name):
date_str = date.strftime('%Y-%m-%d')
@alfred.cached(cache_name, _get_check=lambda d: d['date']==date_str)
def _fetch():
data = {
'action' : 'epg-list',
'date' : date_str,
'channel' : channel
}
schedules = []
soup = parseWebPage(
'http://tv.cntv.cn/index.php',
data=data,
referer=_baseurl
)
epg_list = soup.select('dl dd')
schedules = []
for item in epg_list:
# 已播放的
a_tags = item.select('a')
sche_info = (a_tags[0] if a_tags else item).get_text().strip()
first_space = sche_info.find(' ')
if first_space < 0:
continue
schedules.append({
'time' : sche_info[0:first_space].strip(),
'show' : sche_info[first_space:].strip()
})
if not schedules:
return
return {'date':date_str, 'epg':schedules}
return _fetch()
def fetchChannelEPGToday(channel):
cache_name = '{}-today'.format(channel)
date = datetime.now()
return fetchChannelEPG(channel, date, cache_name)
def fetchChannelEPGTomorrow(channel):
cache_name = '{}-tomorrow'.format(channel)
date = datetime.now() + timedelta(days=1)
return fetchChannelEPG(channel, date, cache_name)
def getChannelList():
return fetchChannels()
def getChannelTitle(channel):
channels = getChannelList()
if channels.has_key(channel):
return channels[channel]
# 获取正在和下一个将要播放的节目
def getCurrentAndNextProgram(channel):
schedules = fetchChannelEPGToday(channel)
if not schedules:
return {}, {}
schedules = schedules['epg']
current = {}
next = {}
schedules = sorted(schedules, key=lambda s:s['time'])
now = datetime.now()
for item in schedules:
try:
time = item['time'].split(':')
hour = int(time[0])
minute = int(time[1])
if (hour==now.hour and minute>now.minute) or hour>now.hour:
next = item
break
current = item
except Exception, e:
raise e
return current, next
def getFavChannels():
favs = alfred.config.get('fav')
channels = getChannelList()
if isinstance(favs, list):
# 去除不在列表的频道
favs = filter(lambda c: c in channels.keys(), favs)
alfred.config.set(fav=favs)
return favs
# 还没有收藏频道
favs = _default_favs
alfred.config.set(fav=favs)
return favs
def isChannelFaved(channel):
return channel in getFavChannels()
def showLive():
favs = getFavChannels()
if not favs: alfred.exit()
feedback = alfred.Feedback()
for channel in favs:
chl_title = getChannelTitle(channel)
if not chl_title:
continue
cur, next = getCurrentAndNextProgram(channel)
title = '{}'.format(chl_title)
if cur:
title = '{} 正在播放: {}'.format(chl_title, cur['show'])
subtitle = ''
if next:
subtitle = '下一个节目: {time} {show}'.format(**next)
feedback.addItem(
title = title,
subtitle = subtitle,
autocomplete = 'epg {}'.format(channel),
valid = False
)
feedback.addItem(
title = '显示所有电视频道',
autocomplete = 'all',
valid = False
)
feedback.addItem(
title = '显示收藏的电视频道',
autocomplete = 'fav',
valid = False
)
feedback.output()
def showAllChannles():
channels = getChannelList()
title_orded = sorted(channels.values(), key=lambda t: t.lower())
def title_id(title):
for k,v in channels.iteritems():
if v==title:
return k
favs = getFavChannels()
feedback = alfred.Feedback()
for chl_title in title_orded:
chl_id = title_id(chl_title)
subtitle = '已收藏' if chl_id in favs else ''
feedback.addItem(
title = chl_title,
subtitle = subtitle,
autocomplete = 'epg {}'.format(chl_id),
valid = False
)
feedback.output()
# 显示某频道或收藏的频道列表
def showEPG():
channel = alfred.argv(2)
if channel:
return showChannleEPG(channel)
favs = getFavChannels()
if not favs: alfred.exitWithFeedback(title='你还没有收藏的频道')
feedback = alfred.Feedback()
for chl_id in favs:
current, next = getCurrentAndNextProgram(chl_id)
subtitle = '正在播放: {show}'.format(**current) if current else ''
feedback.addItem(
title = getChannelTitle(chl_id),
subtitle = subtitle,
arg = chl_id,
autocomplete = 'epg {}'.format(chl_id),
valid = False
)
feedback.output()
# 显示频道节目单
def showChannleEPG(channel):
chl_title = getChannelTitle(channel)
if not chl_title: alfred.exitWithFeedback(title='未找到频道信息')
schedules = fetchChannelEPGToday(channel)
if not schedules: alfred.exitWithFeedback(title='未找到频道的节目单')
feedback = alfred.Feedback()
date_str = datetime.now().strftime('%Y-%m-%d')
now_time = datetime.now().strftime('%H:%M')
is_faved = isChannelFaved(channel)
feedback.addItem(
title = '{} {} 节目单 现在时间: {}'.format(chl_title, date_str, now_time),
subtitle = '已收藏,选择这项可以取消收藏。' if is_faved else '未收藏,选择这项可以收藏',
arg = 'toggle-fav {}'.format(channel)
)
for item in schedules['epg']:
feedback.addItem(
title = '{time} {show}'.format(**item)
)
feedback.output()
def main():
cmd_map = {
'live' : lambda: showLive(),
'epg' : lambda: showEPG(),
'fav' : lambda: showEPG(),
'all' : lambda: showAllChannles(),
}
cmd = alfred.argv(1)
if not cmd or cmd.lower() not in cmd_map.keys():
cmd = 'live'
cmd_map[cmd.lower()]()
if __name__ == '__main__':
main()
```
#### File: download-station/rdl/__init__.py
```python
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os, base64, urllib
from urlparse import urlparse
__version__ = '1.1.0'
def rdlTypeToDesc(t):
type_desc = {
'ed2k' : 'eMule',
'emule' : 'eMule',
'qqdl' : 'QQ旋风',
'thunder' : '迅雷',
'flashget' : '快车',
'magnet' : '磁力链'
}
for s in ['http', 'https', 'ftp', 'ftps', 'sftp']:
type_desc.update({s:s.upper()})
t = t.lower()
if t in type_desc.keys():
return type_desc[t]
return 'UNKNOWN'
class RealDownloadLink(object):
def __init__(self):
pass
def buildResult(self, url, urltype='UNKNOWN', real=None, filename=None, filesize=None):
return {
'type' : urltype,
'original' : url,
'real' : real if real else url,
'filename' : urllib.unquote(filename) if filename else '-',
'filesize' : filesize if filesize else '-'
}
def parse(self, url):
parse_map = {
'ed2k' : lambda: self.parseEd2k(url),
'thunder' : lambda: self.parseThunder(url),
'flashget' : lambda: self.parseFlashget(url),
'qqdl' : lambda: self.parseQQdl(url)
}
scheme = urlparse(url).scheme
if scheme in parse_map.keys():
return parse_map[scheme]()
elif scheme in ['http', 'https', 'ftp', 'ftps', 'sftp']:
return self.parseNormal(url)
elif scheme in ['magnet']:
return self.buildResult(url, scheme)
return self.buildResult(url)
def parseNormal(self, url):
uri = urlparse(url)
filename = os.path.basename(uri.path)
return self.buildResult(url, uri.scheme, url, filename)
def parseEd2k(self, url):
name = ''
size = ''
try:
parts = url.split('|')
name = parts[2]
size = self.humanReadable(parts[3])
return self.buildResult(url, 'emule', url, name, size)
except:
return self.buildResult(url, 'emule')
def parseThunder(self, url):
# 格式: thunder://CODEPART
# CODEPART = 'AA真实地址ZZ'的base64编码
uri = urlparse(url)
try:
real = uri.netloc
# base64解码
real = base64.b64decode(real)
#去除前后的AA ZZ
real = real[2:-2]
res = self.parse(real)
res['original'] = url
res['type'] = 'thunder'
return res
except:
return self.buildResult(url, 'thunder')
def parseFlashget(self, url):
# 格式: flashget://CODEPART&HASHCODE
# CODEPART = '[FLASHGET]真实地址[FLASHGET]'的base64编码
# HASHCODE 无用 可有可无
uri = urlparse(url)
try:
real = uri.netloc
# 去除HASHCODE
if real.rfind('&') >= 0:
real = real[0:real.rfind('&')]
# base64解码
real = base64.b64decode(real)
# 去除 [FLASHGET] [FLASHGET]
real = real[10:-10]
res = self.parse(real)
res['original'] = url
res['type'] = 'flashget'
return res
except:
return self.buildResult(url, 'flashget')
def parseQQdl(self, url):
# 格式: qqdl://CODEPART
# CODEPART = 真实地址的base64编码
uri = urlparse(url)
try:
# base64解码
real = base64.b64decode(uri.netloc)
res = self.parse(real)
res['original'] = url
res['type'] = 'qqdl'
return res
except:
return self.buildResult(url, 'qqdl')
def humanReadable(self, byte):
if isinstance(byte, (str, unicode)):
byte = int(byte) if byte.isdigit() else 0
size = byte / 1024.0
unit = 'KB'
if size > 1024:
size = size / 1024.0
unit = 'MB'
if size > 1024:
size = size / 1024.0
unit = 'GB'
return '{:.2f}{}'.format(size, unit)
def main():
import alfred
url = sys.argv[1]
if not url:
alfred.exitWithFeedback(title='UNKNOWN')
rdl = RealDownloadLink()
res = rdl.parse(url)
feedback = alfred.Feedback()
res.update({'type_desc' : rdlTypeToDesc(res['type'])})
feedback.addItem(
title = res['real'],
subtitle = '{type_desc} {filename} {filesize}'.format(**res),
arg = res['real']
)
feedback.output()
if __name__ == '__main__':
main()
```
#### File: src/lyric/lyric.py
```python
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os, urllib, urllib2, subprocess, json, re, codecs
from base64 import b64encode, b64decode
from xml.dom import minidom
import alfred
from pprint import pprint
__version__ = '1.0.1'
class Lyric(object):
def __init__(self):
pass
def run(self):
cmd_map = {
'search' : lambda: self.search(),
'output-clean' : lambda: self.outputCleanContent(),
'download' : lambda: self.download(),
'save-to-itunes' : lambda: self.saveLyricToiTunes()
}
cmd = alfred.argv(1)
if not cmd:
cmd = 'search'
if cmd in cmd_map.keys():
cmd_map[cmd]()
def search(self):
title = alfred.argv(2)
artist = alfred.argv(3)
if title is None:
self.outputiTunesPlayingSong()
res, data = self.fetchLyricList(title, artist)
if not res:
alfred.exitWithFeedback(
title = data,
subtitle = "Format: lrc TITLE ARTIST e.g. lrc 'Heal The World' '<NAME>'"
)
feedback = alfred.Feedback()
for lrc in data:
feedback.addItem(
title = '{artist} - {title}'.format(**lrc),
arg = lrc['id']
)
feedback.output()
def outputiTunesPlayingSong(self):
res, data = self.fetchiTunesPlaying()
if not res or not data['title']:
return
title = '{title}' if not data['artist'] else '{artist} - {title}'
title = title.format(**data)
alfred.exitWithFeedback(
title = title,
subtitle = 'iTunes current playing track',
autocomplete = '"{title}" "{artist}"'.format(**data),
valid = False
)
def fetchiTunesPlaying(self):
try:
res = subprocess.check_output(['osascript', 'itunes.applescript'])
if not res:
return False, ''
info = map(lambda s: s.strip(), res.split(','))
code = int(info[0])
if code != 0:
return False, info[1]
return True, {'artist': info[1], 'title' : info[2]}
except Exception, e:
return False, '{}'.format(e)
def fetchLyricList(self, title, artist = ''):
if not title:
return False, 'Song title missing.'
if artist is None:
artist = ''
# 缓存结构 { 'query' : {'title': '', 'artist': ''}, 'list' : [] }
cache = alfred.cache.get('lyric-list')
if cache and cache['query']['title'] == title and cache['query']['artist'] == artist:
return True, cache['list']
try:
paras = {
'Title' : subprocess.check_output(['php', '-f', 'ttplayer.php', 'sh', title]),
'Artist' : subprocess.check_output(['php', '-f', 'ttplayer.php', 'sh', artist]),
'Flags' : 0
}
url = 'http://lrccnc.ttplayer.com/dll/lyricsvr.dll?sh?{}'.format(urllib.urlencode(paras))
res = urllib2.urlopen(url)
dom = minidom.parse(res)
lrcs = dom.documentElement.getElementsByTagName('lrc')
if len(lrcs) == 0:
return False, 'Lyric is non-existent.'
data = []
for lrc in lrcs:
data.append({
'id' : lrc.getAttribute('id'),
'artist' : lrc.getAttribute('artist'),
'title' : lrc.getAttribute('title')
})
cache = {
'query' : {'title' : title, 'artist' : artist},
'list' : data
}
alfred.cache.set('lyric-list', cache)
return True, data
except Exception, e:
return False, '{}'.format(e)
def fetchLyricContent(self, lyric_id):
if not lyric_id:
return False, 'Lyric is non-existent.'
# 缓存结构 { 'id' : '', 'content' : ''}
cache = alfred.cache.get('lyric-content')
if cache and cache['id'] == lyric_id:
return True, cache['content']
info = self.getLyricInfoFromCache(lyric_id)
if info is None:
return False, 'Lyric is non-existent.'
try:
paras = {
'id' : info['id'],
'code' : subprocess.check_output(['php', '-f', 'ttplayer.php', 'dl', info['id'], info['artist'], info['title']])
}
#! Id必须在Code之前 不能用urllib.urlencode
url = 'http://lrccnc.ttplayer.com/dll/lyricsvr.dll?dl?Id={id}&Code={code}'.format(**paras)
res = urllib2.urlopen(url)
content = res.read()
cache = {
'id' : lyric_id,
'content' : content
}
alfred.cache.set('lyric-content', cache)
return True, content
except Exception, e:
return False, '{}'.format(e)
def cleanLyricTimeline(self, lrc):
new_lrc = ''
last_line_empty = False
for line in lrc.splitlines():
while re.search(r'^\[(.+?)\]', line):
line = re.sub(r'^\[(.+?)\]', '', line)
# 去除QQ
line = re.sub(r'(.*)QQ[:: ](.*)', '', line)
line = line.strip()
if last_line_empty and not line:
continue
last_line_empty = True if not line else False
new_lrc += '{}\n'.format(line.strip())
return new_lrc.strip()
def getLyricInfoFromCache(self, lrc_id):
cache = alfred.cache.get('lyric-list')
if not cache:
return
for lrc in cache['list']:
if lrc['id'] == lrc_id:
return lrc
def getCleanLyricContent(self):
lrc_id = alfred.argv(2)
res, data = self.fetchLyricContent(lrc_id)
if not res:
return
return self.cleanLyricTimeline(data)
def outputCleanContent(self):
alfred.exit(self.getCleanLyricContent())
def download(self):
lrc_id = alfred.argv(2)
info = self.getLyricInfoFromCache(lrc_id)
if info is None:
alfred.exit('Lyric is non-existent.')
res, data = self.fetchLyricContent(lrc_id)
if not res:
alfred.exit(data)
try:
filename = '{title}.lrc' if not info['artist'] else '{artist} - {title}.lrc'
dl_path = os.path.expanduser('~/Downloads')
dl_path = os.path.join(dl_path, filename.format(**info))
with codecs.open(dl_path, 'w', 'utf-8') as f:
f.write(data)
if os.path.exists(dl_path):
subprocess.check_output(['open', os.path.dirname(dl_path)])
alfred.exit('Lyric downloaded.')
except Exception, e:
alfred.exit('Download lyric fail. {}'.format(e))
def saveLyricToiTunes(self):
lrc = self.getCleanLyricContent()
if not lrc:
alfred.exit('Fail: lyric is non-existent.')
# lrc = 'test'
res = subprocess.check_output('osascript itunes.applescript lyric "{}"'.format(lrc), shell=True)
info = map(lambda s: s.strip(), res.split(','))
code = int(info[0])
if code != 0:
alfred.exit('Fail: {}'.format(info[1]))
alfred.exit('Lyric saved to {} - {}.'.format(info[1], info[2]))
if __name__ == '__main__':
Lyric().run()
```
#### File: src/yyets/yyets.py
```python
import os, sys
import time, json, warnings
from base64 import b64encode, b64decode
from pprint import pprint
import alfred
alfred.setDefaultEncodingUTF8()
import bs4
__version__ = '2.1.0'
_base_host = 'http://www.yyets.com/'
_fb_return = lambda: getReturnLastQueryFeedbackItem()
_fb_return_top = lambda: alfred.Item(title='返回', valid=False, autocomplete=' ')
_fb_no_found = lambda: getReturnLastQueryFeedbackItem('没有找到想要的内容')
_fb_no_logined = lambda: alfred.Item(title='需要登录才能查看', subtitle='选择设置用户名和密码', valid=False, autocomplete='setting ')
# 资源模版
_res_tpl = {
'id' : 0,
'title' : '',
'img' : '',
'page' : 0,
'info' : '',
'files' : []
}
_res_file_tpl = {
'id' : 0,
'info' : '',
'type' : '',
'format' : '',
'filename' : '',
'filesize' : '',
'emule' : '',
'magnet' : '',
'baidu' : '',
'update_date' : ''
}
def recordQuery():
current = sys.argv[1:]
queries = alfred.cache.get('record-query', {})
last = queries.get('current', '')
# 不同时才记录
if current != last:
queries.update(
current = current,
last = last
)
alfred.cache.set('record-query', queries, 600)
def getReturnLastQueryFeedbackItem(title='返回', subtitle='回到上一个操作'):
last_query = alfred.cache.get('record-query', {}).get('last', [])
return alfred.Item(
title = title,
subtitle = subtitle,
valid = False,
autocomplete = ' '.join(last_query)
)
def login():
alfred.cache.delete('cookie')
usr = alfred.config.get('usr')
pwd = alfred.config.get('pwd')
if not usr or not pwd:
return False
try:
res = alfred.request.post(
os.path.join(_base_host, 'user/login/ajaxLogin'),
data = {
'type' : 'nickname',
'account' : usr,
'password' : <PASSWORD>,
'remember' : 1
}
)
ret = json.loads(res.getContent())
if ret.get('status', 0) == 1:
cookies = {}
for c in res.cookieJar:
if c.name in ['GINFO', 'GKEY']:
cookies[c.name] = c.value
alfred.cache.set('cookie', cookies, 3600)
return True
except Exception, e:
pass
def getLoginCookies():
cache = alfred.cache.get('cookie')
if cache:
return cache
login()
return alfred.cache.get('cookie')
def isLogined():
return bool(getLoginCookies())
def parseWebPage(url, **kwargs):
try:
if not kwargs.has_key('cookie') and isLogined():
kwargs['cookie'] = getLoginCookies()
res = alfred.request.get(url, **kwargs)
content = res.getContent()
# 禁止显示BeautifulSoup警告
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return bs4.BeautifulSoup(content)
except Exception, e:
raise e
# 小海报地址获取
# 文件名的第一位字母标识海报大小 s m b
def smallPosterURL(url):
if not url:
return url
dirname = os.path.dirname(url)
basename = os.path.basename(url)
if basename.startswith('s'):
return url
s_basename = '{}{}'.format('s', basename[1:])
return os.path.join(dirname, s_basename)
# 解析下载地址
# links 为下载链接bs4对象集合
def parseDownloadLink(links):
dls = {}
for link in links:
href = link.get('href', '')
if href.startswith('ed2k'):
dls['emule'] = href
elif href.startswith('magnet'):
dls['magnet'] = href
elif href.startswith('http://pan.baidu.com/'):
dls['baidu'] = href
return dls
def parseDownloadHas(data):
has = {}
for i in ['emule', 'magnet', 'baidu']:
has['has_' + i] = '有' if data[i] else '无'
return has
# 过滤
# 以`,`分割 1 文件名 2 格式
def filterItems(filter_str, items):
if filter_str:
filters= filter_str.split(',')
if len(filters) >= 1:
file_filter = filters[0].lower()
items = filter(lambda f: file_filter in f['filename'].lower(), items)
if len(filters) >= 2:
format_filter = filters[1].lower().lower()
items = filter(lambda f: format_filter in f['format'].lower(), items)
return items
# 获取最新更新
def fetchRecentItems(channel):
# channel: movie tv documentary openclass topic
search_channel = ''
# 如果查找的类别不为空的话,获取其完整的正确名称
if channel:
for valid_chl in ['movie', 'tv', 'documentary', 'openclass', 'topic']:
if valid_chl.startswith(channel):
search_channel = valid_chl
if not search_channel:
return []
cache_name = 'recent-{}-items'.format(search_channel)
@alfred.cached(cache_name, expire=600)
def _fetchRecentItems():
items = []
soup = parseWebPage(
os.path.join(_base_host, 'resourcelist'),
data = {
'channel' : search_channel,
'sort' : 'update'
}
)
# pprint(soup.select('ul.boxPadd li'))
for single in soup.select('ul.boxPadd li'):
try:
info = single.select('div.f_r_info dl')[0]
item = {}
item.update(**_res_tpl)
item.update(
id = int(os.path.basename(single.select('div.f_l_img')[0].a['href'])),
title = info.dt.get_text(' ', strip=True),
img = smallPosterURL( single.select('div.f_l_img')[0].img['src'] )
)
map(lambda t: t.font.clear(),info.dd.select('span'))
item['info'] = '说明: {} 人气: {}'.format(
info.dd.select('span')[0].get_text('', strip=True),
info.dd.select('span')[2].get_text('', strip=True)
)
items.append(item)
except Exception, e:
continue
if items:
return items
return _fetchRecentItems()
# 获取今日更新
@alfred.cached('today-items', expire=600)
def fetchTodayItems():
items = []
soup = parseWebPage(os.path.join(_base_host, 'today'))
day = ''
for single in soup.select('table tr.list'):
# 只显示最近日期的文件
item_day = single.get('day', '')
if not day:
day = item_day
if day != item_day:
continue;
info = single.select('td')
item = {}
item.update(**_res_file_tpl)
item.update(
type = info[0].get_text(),
format = info[1].get_text(),
filename = info[2].find('a').get_text(),
filesize = info[4].get_text(),
update_date = '{} {}'.format(single['day'], info[5].get_text())
)
res_id = os.path.basename(info[2].find('a')['href'])
# 文件ID 页面没有提供 自定义 资源ID+hash
item_id ='{}-{}'.format(res_id, alfred.util.hashDigest(res_id + item['filename']))
item['id'] = item_id
item.update(**parseDownloadLink( single.select('td.dr_ico a') ))
items.append(item)
if items:
return items
# 获取24小时热门榜
@alfred.cached('top-items', expire=600)
def fetchTopItems():
items = []
soup = parseWebPage(os.path.join(_base_host, 'resourcelist'))
for single in soup.select('ul.top_list2 li'):
try:
item = {}
item.update(**_res_tpl)
img_ele = single.select('div.f_l_img')
info = single.select('div.f_r_info div')
item.update(
id = int(os.path.basename(img_ele[0].a['href'])),
title = info[0].get_text().strip('《》'),
img = smallPosterURL( img_ele[0].a.img['src'] ),
info = '{} {} {}'.format(info[1].get_text(), info[2].get_text(), info[3].get_text())
)
items.append(item)
except Exception, e:
continue
if items:
return items
# 获取单个资源信息
def fetchSingleResource(res_id):
cache_name = 'single-resource-{}'.format(res_id)
@alfred.cached(cache_name, expire=3600)
def _fetchSingleResource():
page_url = getResourcePageURLByID(res_id)
soup = parseWebPage(page_url)
res = {}
res.update(**_res_tpl)
res.update(
id = res_id,
title = soup.select('h2 strong')[0].get_text('', strip=True),
img = smallPosterURL(soup.select('div.res_infobox div.f_l_img img')[0]['src']),
page = page_url
)
for single in soup.select('ul.resod_list li'):
item = {}
item.update(**_res_file_tpl)
item.update(
id = single['itemid'],
format = single['format'],
filename = single.select('div.lks .lks-1')[0].get_text(),
filesize = single.select('div.lks .lks-2')[0].get_text(),
)
item.update(**parseDownloadLink( single.select('div.download a') ))
res['files'].append(item)
# 缓存60分钟
if res['files']:
return res
return _fetchSingleResource()
# 获取搜索结果
def fetchSearchResult(word):
if not word:
return []
cache_name = 'search-{}'.format(word.lower())
@alfred.cached(cache_name, expire = 600)
def _fetchSearchResult():
items = []
soup = parseWebPage(
os.path.join(_base_host, 'search/index'),
data = {
'keyword' : '{}'.format(word),
'type' : 'resource',
'order' : 'uptime'
}
)
for single in soup.select('ul.allsearch li'):
try:
item = {}
item.update(**_res_tpl)
item.update(
title = single.select('div.all_search_li2')[0].get_text(),
id = int(os.path.basename(single.select('div.all_search_li2')[0].a['href']))
)
# 信息
pub_time = time.localtime(float(single.select('span.time')[0].get_text().strip()))
update_time = time.localtime(float(single.select('span.time')[1].get_text().strip()))
item['info'] = '类型:{} 发布时间:{} 更新时间:{} {}'.format(
single.select('div.all_search_li1')[0].get_text().strip(),
time.strftime('%Y-%m-%d %H:%I', pub_time),
time.strftime('%Y-%m-%d %H:%I', update_time),
single.select('div.all_search_li3')[0].get_text().strip()
)
items.append(item)
except Exception, e:
continue
return items
return _fetchSearchResult()
def getResourcePageURLByID(res_id):
return os.path.join(_base_host, 'resource', unicode(res_id))
# 获取最新更新
def recent():
try:
items = fetchRecentItems(alfred.argv(2))
if not items:
alfred.exitWithFeedback(item=_fb_no_found())
feedback = alfred.Feedback()
for item in items:
feedback.addItem(
title = item['title'],
subtitle = item['info'],
icon = alfred.storage.getLocalIfExists(item['img'], True),
valid = False,
autocomplete = 'resource {} '.format(item['id'])
)
feedback.addItem(item=_fb_return_top())
feedback.output()
except Exception, e:
alfred.exitWithFeedback(item=_fb_no_found())
# 最近今日更新
def today():
if not isLogined():
alfred.exitWithFeedback(item=_fb_no_logined())
try:
items = fetchTodayItems()
items = filterItems(alfred.argv(2), items)
if not items:
alfred.exitWithFeedback(item=_fb_no_found())
feedback = alfred.Feedback()
for item in items:
item.update(**parseDownloadHas(item))
subtitle = '类别: {type} 格式: {format} 容量: {filesize} 电驴: {has_emule} 磁力链: {has_magnet} 百度盘: {has_baidu} 日期: {update_date}'.format(**item)
feedback.addItem(
title = item['filename'],
subtitle = subtitle,
valid = False,
autocomplete = 'today-file {}'.format(item['id'])
)
feedback.addItem(item=_fb_return_top())
feedback.output()
except Exception, e:
alfred.exitWithFeedback(item=_fb_no_found())
# 24小时最热资源
def top():
try:
items = fetchTopItems()
if not items:
alfred.exitWithFeedback(item=_fb_no_found())
feedback = alfred.Feedback()
count = 1
for item in items:
feedback.addItem(
title = '{:02d}. {}'.format(count, item['title']),
subtitle = item['info'],
icon = alfred.storage.getLocalIfExists(item['img'], True),
valid = False,
autocomplete = 'resource {id} '.format(**item)
)
count = count + 1
feedback.addItem(item=_fb_return_top())
feedback.output()
except Exception, e:
alfred.exitWithFeedback(item=_fb_no_found())
def search():
try:
word = ' ' .join(sys.argv[2:])
if not word:
alfred.exitWithFeedback(title='输入搜索关键词', valid=False)
items = fetchSearchResult(word)
if not items:
alfred.exitWithFeedback(item=_fb_no_found())
feedback = alfred.Feedback()
for item in items:
feedback.addItem(
title = item['title'],
subtitle = item['info'],
valid = False,
autocomplete = 'resource {} '.format(item['id'])
)
feedback.addItem(item=_fb_return_top())
feedback.output()
except Exception, e:
alfred.exitWithFeedback(_fb_no_found())
def resource():
try:
res_id = int(alfred.argv(2))
data = fetchSingleResource(res_id)
files = data.get('files', [])
files = filterItems(alfred.argv(3), files)
if not data:
alfred.exitWithFeedback(item=_fb_no_found)
feedback = alfred.Feedback()
feedback.addItem(
title = data['title'],
subtitle = '{} 个文件,可使用`文件名,格式`过滤,如:`s09` `,mp4` `s01e08,hdtv`,选择此项打开资源页面'.format(len(files)),
arg = 'open-url {}'.format( b64encode(getResourcePageURLByID(data['id'])) ),
icon = alfred.storage.getLocalIfExists(data['img'], True)
)
files_ids = []
for f in files:
files_ids.append(f['id'])
f.update(**parseDownloadHas(f))
subtitle = '类型: {format} 容量: {filesize} 电驴: {has_emule} 磁力链: {has_magnet} 百度盘: {has_baidu}'.format(**f)
feedback.addItem(
title = f['filename'],
subtitle = subtitle,
valid = False,
autocomplete = 'file {},{}'.format(data['id'], f['id']),
icon = alfred.storage.getLocalIfExists(data['img'], True)
)
if len(files_ids) > 1:
feedback.addItem(
title = '所有文件',
subtitle = '对当前的所有文件进行批量处理',
valid = False,
autocomplete = 'file {},{}'.format(data['id'], ','.join(files_ids)),
icon = alfred.storage.getLocalIfExists(data['img'], True)
)
feedback.addItem(item=_fb_return())
feedback.output()
except Exception, e:
alfred.exitWithFeedback(item=_fb_no_found())
def fileDownloadFeedback(feedback, res_id, emule, magnet, baidu=None):
if baidu:
feedback.addItem(
title = '打开百度盘',
subtitle = baidu,
arg = 'open-url {}'.format(b64encode(baidu))
)
if emule:
feedback.addItem(
title = '拷贝eMule地址到剪切板',
subtitle = emule,
arg = 'copy-to-clipboard {}'.format(b64encode(emule))
)
if magnet:
feedback.addItem(
title = '拷贝磁力链到剪切板',
subtitle = magnet,
arg = 'copy-to-clipboard {}'.format(b64encode(magnet))
)
# 使用download station Workflow 下载 eMule优先
#? 如何判断workflow已安装
if alfred.isWorkflowWorking('net.jeeker.awf.DownloadStation'):
if emule or magnet:
feedback.addItem(
title = '使用Download Station Workflow下载',
subtitle = '优先使用电驴地址下载',
arg = 'download-with-ds {}'.format( b64encode(emule if emule else magnet))
)
if not emule and not magnet:
feedback.addItem(
title = '没有找到电驴或磁力链地址,打开资源页面',
arg = 'open-url {}'.format( b64encode(getResourcePageURLByID(res_id)) )
)
return feedback
def file():
try:
ids = alfred.argv(2).split(',')
res_id = int(ids[0])
file_ids = map(lambda i:int(i), ids[1:])
data = fetchSingleResource(res_id)
files = filter(lambda f: int(f['id']) in file_ids, data['files'])
feedback = alfred.Feedback()
if not files:
feedback.addItem(
title = '没有找到想要的内容',
subtitle = '这里可返回资源列表',
valid = False,
autocomplete = 'resource {} '.format(res_id),
icon = alfred.storage.getLocalIfExists(data['img'], True)
)
elif len(files) == 1:
subtitle = '类型: {format} 容量: {filesize} 这里可返回资源列表'.format(**files[0])
feedback.addItem(
title = files[0]['filename'],
subtitle = subtitle,
valid = False,
autocomplete = 'resource {} '.format(res_id),
icon = alfred.storage.getLocalIfExists(data['img'], True)
)
feedback = fileDownloadFeedback(feedback, data['page'], files[0]['emule'], files[0]['magnet'], files[0]['baidu'])
else:
feedback.addItem(
title = '批处理多个文件',
subtitle = '{} 个文件, 这里可返回资源列表'.format(len(files)),
valid = False,
autocomplete = 'resource {}'.format(res_id),
icon = alfred.storage.getLocalIfExists(data['img'], True)
)
emule = '\n'.join( [f['emule'] for f in files] )
magnet = '\n'.join( [f['magnet'] for f in files] )
feedback = fileDownloadFeedback(feedback, data['page'], emule, magnet)
feedback.output()
except Exception, e:
alfred.exitWithFeedback(item=_fb_no_found())
def todayFile():
if not isLogined():
alfred.exitWithFeedback(item=_fb_no_logined())
try:
item_id = alfred.argv(2)
res_id = item_id.split('-')[0]
item = {}
for item in fetchTodayItems():
if item.get('id') == item_id:
break
if not item:
alfred.exitWithFeedback(item=_fb_no_found())
feedback = alfred.Feedback()
feedback.addItem(
title = item['filename'],
subtitle = '类别: {type} 格式: {format} 容量: {filesize} 日期: {update_date} 这里可访问资源文件列表'.format(**item),
valid = False,
autocomplete = 'resource {}'.format(res_id)
)
feedback = fileDownloadFeedback(feedback, res_id, item['emule'], item['magnet'], item['baidu'])
feedback.addItem(item=_fb_return())
feedback.output()
except Exception, e:
alfred.exitWithFeedback(item=_fb_no_found())
def setting():
usr = alfred.argv(2)
pwd = alfred.argv(3)
info = ''
if usr:
info = '用户名: {} 密码: {}'.format(usr, pwd)
elif alfred.config.get('usr'):
info = '现有设置: 用户名: {} 密码: ********'.format(alfred.config.get('usr'))
feedback = alfred.Feedback()
feedback.addItem(
title = '{}用户名和密码'.format('修改' if isLogined() else '设置'),
subtitle = '格式:用户名 密码 {}'.format(info),
arg = 'account-setting {} {}'.format(usr, pwd)
)
feedback.output()
def menu():
feedback = alfred.Feedback()
feedback.addItem(
title = '人人影视 24小时热门资源',
subtitle = '最近24小时的热门排行',
autocomplete = 'top',
valid = False
)
if isLogined():
feedback.addItem(
title = '人人影视 今日更新的文件',
subtitle = '若今天尚无文件,则显示前一天,可使用`文件名,格式`过滤,如:`s09` `,mp4` `s01e08,hdtv`',
autocomplete = 'today ',
valid = False
)
feedback.addItem(
title = '人人影视 最近更新的资源',
subtitle = '可使用movie, tv, documentary, openclass, topic过滤相应的资源',
autocomplete = 'recent ',
valid = False
)
feedback.addItem(
title = '人人影视 资源搜索...',
subtitle = '',
autocomplete = 'search ',
valid = False
)
feedback.addItem(
title = '{}用户名和密码'.format('修改' if isLogined() else '设置'),
subtitle = '{}查看今日更新文件或某些有版权问题需要,仅支持用户名方式登陆'.format('已设置并成功登陆,' if isLogined() else ' '),
valid = False,
autocomplete = 'setting '
)
feedback.output()
def main():
alfred.cache.cleanExpired()
cmds = {
'menu' : menu,
'recent' : recent,
'top' : top,
'search' : search,
'resource' : resource,
'file' : file,
'today' : today,
'today-file': todayFile,
'setting' : setting
}
subcmd = alfred.argv(1) or ''
subcmd = subcmd.lower()
# 空 或 有意义的命令才记录
if not subcmd or subcmd in cmds:
recordQuery()
cmds[subcmd]() if subcmd in cmds else cmds['menu']()
if __name__ == '__main__':
main()
``` |
{
"source": "JinnLynn/dockerfiles",
"score": 2
} |
#### File: dockerfiles/flask/app.py
```python
import sys
import platform
from subprocess import check_output
import flask
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
kwargs = {}
if sys.version_info.major == 3:
kwargs.update(encoding='utf8')
uwsgi_ver = check_output(['uwsgi', '--version'], **kwargs)
return "it's work. Python-{} Flask-{} uWSGI-{}".format(
platform.python_version(),
flask.__version__,
uwsgi_ver.strip())
``` |
{
"source": "JinnLynn/genpac-server",
"score": 2
} |
#### File: genpac-server/genpac_server/views.py
```python
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import os
import time
from functools import wraps
from urlparse import urlparse
from pprint import pprint
from flask import Flask, Response, render_template, request, url_for
from flask import current_app, jsonify, redirect
import genpac
from . import __version__, __project_url__
from . import main
from .utils import calc_hash, surmise_domain, replace_all, get_genpac_version
from .utils import query2replacements, replacements2query
def send_file(filename, replacements={}, mimetype=None, add_etags=True):
# 忽略文件名以`_`开始的文件
if filename.startswith('_'):
return current_app.make_response(('Not Found.', 404))
replacements.update(query2replacements(request.values))
try:
if not os.path.isabs(filename):
filename = os.path.abspath(
os.path.join(current_app.config.options.target_path,
filename))
with open(filename) as fp:
content = fp.read()
if replacements:
content = replace_all(content, replacements)
resp = current_app.make_response(content)
resp.mimetype = mimetype or 'text/plain'
resp.last_modified = os.path.getmtime(filename)
if add_etags:
etag = '{}-{}-{}-{}'.format(filename,
os.path.getmtime(filename),
os.path.getsize(filename),
replacements2query(replacements))
etag = calc_hash(etag)
resp.set_etag(etag)
return resp
except Exception as e:
print('GenPAC Error: {}'.format(e))
return current_app.make_response(('Not Found.', 404))
def is_authorized():
if not current_app.config.options.auth_token:
return True
auth_token = request.headers.get('Token', None)
if auth_token is None:
auth_token = request.values.get('token', None)
if auth_token == current_app.config.options.auth_token:
return True
return False
def authorized(func):
@wraps(func)
def wrapper(*args, **kwargs):
if is_authorized():
return func(*args, **kwargs)
return current_app.make_response(('Unauthorized.', 401))
return wrapper
def make_res_data(data={}, code=0, msg='成功'):
return jsonify({'data': data, 'code': code, 'msg': msg})
@main.before_request
def load_domains():
if not current_app.extensions['genpac'].domains_outdate:
return
current_app.logger.info('Domains Loaded.')
with open(current_app.config.options.domains_file) as fp:
domains = {'p': [], 'd': []}
for line in fp.readlines():
t, d = line.split(',')
domains[t.strip()].append(d.strip())
current_app.extensions['genpac'].domains_proxy = domains['p']
current_app.extensions['genpac'].domains_direct = domains['d']
current_app.extensions['genpac'].domains_outdate = False
@main.app_template_global('powered_by')
def powered_by():
try:
if current_app.extensions['genpac'].last_builded <= 0:
statinfo = os.stat(current_app.config.options.domains_file)
current_app.extensions['genpac'].last_builded = statinfo.st_mtime
except Exception:
build_date = '-'
else:
build_date = time.strftime(
'%Y-%m-%d %H:%M:%S',
time.localtime(current_app.extensions['genpac'].last_builded))
pb = 'Last Builded: {} ' \
'Powered by <a href="{}">GenPAC v{}</a> ' \
'<a href="{}">GenPAC-Server v{}</a>'
return pb.format(
build_date,
genpac.__project_url__, get_genpac_version(),
__project_url__, __version__)
@main.route("/", methods=['GET', 'POST'])
def index():
return render_template('index.html',
ip_srvs=current_app.config.options.ip_srvs)
@main.route('/pac/<location>/', methods=['GET', 'POST'])
@authorized
def get_pac(location):
proxy = current_app.config.options.pacs.get(location) or location
return send_file('pac.tpl', replacements={'__PROXY__': proxy},
mimetype='application/javascript')
@main.route('/file/#', methods=['GET', 'POST'])
@authorized
def get_file(filename):
return send_file(filename)
@main.route('/rules/', methods=['GET', 'POST'])
def rules():
if not current_app.config.options.server_rule_enabled:
return current_app.make_response(('Not Found.', 404))
content = ''
try:
with open(current_app.config.options.server_rule_file) as fp:
content = fp.read()
except Exception as e:
pass
return render_template('rules.html',
content=content,
action_url=url_for('.rules_update'),
token=request.values.get('token', ''))
@main.route('/s/<code>', methods=['POST', 'GET'])
@authorized
def shortener(code):
try:
code_cfg = current_app.config.options.shortener.get(code)
cfgs = code_cfg.split(' ')
cfgs.append('')
filename, query = cfgs[0:2]
except Exception as e:
print('shortener ERROR: {}'.format(e))
return current_app.make_response(('Not Found.', 404))
rms = query2replacements(query)
return send_file(filename, replacements=rms)
@main.route('/test/', methods=['GET', 'POST'])
def test():
url = request.values.get('url', None)
if not url:
return make_res_data(code=1, msg='地址不能为空')
data = current_app.extensions['genpac']
domain = surmise_domain(url)
return make_res_data(data={
'd': domain in data.domains_direct,
'p': domain in data.domains_proxy,
'domain': domain,
'url': url})
@main.route('/rules-update/', methods=['POST'])
def rules_update():
if not current_app.config.options.server_rule_enabled:
return make_res_data(code=404, msg='服务端用户规则未启用')
if not is_authorized():
return make_res_data(code=401, msg='未授权, token错误')
try:
content = request.form.get('rules', '')
with open(current_app.config.options.server_rule_file, 'w') as fp:
fp.write(content.strip())
return make_res_data()
except Exception as e:
return make_res_data(code=1, msg='出错了, {}'.format(e))
@main.route('/ip/')
def show_ip():
ip = request.remote_addr
return Response(
'{}\n'.format(ip), mimetype="text/plain",
headers={'X-Your-Ip': ip,
'Access-Control-Allow-Origin': '*'})
``` |
{
"source": "jinnn-dev/learning-by-annotations",
"score": 2
} |
#### File: app/persistance/custom_minio_client.py
```python
import json
import os
from typing import Any
from minio import Minio
from minio.deleteobjects import DeleteObject
def policy(bucket_name):
return {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"AWS": ["*"]},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::" + bucket_name + "/*",
}
],
}
class MinioClient:
def __init__(self):
self.instance = Minio(
endpoint=os.environ["MINIO_URL"]
if "MINIO_URL" in os.environ
else "minio:9000",
access_key=os.environ["MINIO_ROOT_USER"],
secret_key=os.environ["MINIO_ROOT_PASSWORD"],
secure=False,
)
def create_bucket(self, bucket_name: str) -> None:
bucket = self.instance.bucket_exists(bucket_name)
if not bucket:
bucket = self.instance.make_bucket(bucket_name, "eu")
print("✔️ Bucket created")
self.instance.set_bucket_policy(
bucket_name, json.dumps(policy(bucket_name))
)
print("✔️ Bucket policy created")
else:
print("Bucket already exists")
def create_object(
self, *, bucket_name: str, file_name: str, file_content: Any, content_type: Any
):
try:
self.instance.fput_object(
bucket_name,
file_name,
file_content,
metadata={"Content-type": content_type},
)
print(f"✔️ {file_name} has been created")
except Exception as exc:
print(f"❌ {file_name} couldn't be created")
print(exc)
raise Exception(f"{file_name} could not be created")
def delete_object(self, *, bucket_name: str, file_name: str):
print(f"🚮 {file_name} has been deleted")
self.instance.remove_object(bucket_name, file_name)
def delete_folder(self, *, bucket_name: str, folder_path: str):
delete_object_list = map(
lambda x: DeleteObject(x.object_name),
self.instance.list_objects(bucket_name, prefix=folder_path, recursive=True),
)
errors = self.instance.remove_objects(bucket_name, delete_object_list)
for error in errors:
print(error)
def delete_all_objects(self, *, bucket_name: str):
objects = self.instance.list_objects(bucket_name)
for item in objects:
self.instance.remove_object(bucket_name, item.object_name)
print("✔️ All Objects deleted")
def get_object(self, *, bucket_name: str, file_name: str):
return self.instance.get_object(bucket_name, object_name=file_name)
```
#### File: app/utils/file_utils.py
```python
import os
import aiofiles
from app.config import Config
from app.worker import convert_slide
from fastapi.datastructures import UploadFile
from fastapi.param_functions import File
async def write_file(
folder_name: str, file_name: str, file: UploadFile = File(...)
) -> None:
"""
Writes the given file asynchronously in a chunkwise manner to the disk.
File must be stored in a separate folder.
All data written will be stored in the data folder
:param folder_name: The name of the folder to store the file in
:param file_name: The name of the file
:param file: The file to save on the disk
:return: The coroutine which can be awaited
"""
async with aiofiles.open(
f"{Config.TEMP_IMAGES_FOLDER}/{folder_name}/{file_name}", "wb"
) as out_file:
while content := await file.read(1024):
await out_file.write(content)
async def write_slide_to_disk(
folder_name: str, file_name: str, file: UploadFile = File(...)
) -> None:
"""
Creates a image pyramid from the given file.
All images are stored in the given folder name
:param folder_name: The name of the folder where to store the slide
:param file_name: The name of the file
:param file: The file to save an create a slide from
"""
os.mkdir(f"{Config.TEMP_IMAGES_FOLDER}/{folder_name}")
await write_file(folder_name, file_name, file)
convert_slide.delay(file_name)
``` |
{
"source": "jinnn-dev/patholearn",
"score": 2
} |
#### File: api/endpoints/task_groups.py
```python
from typing import Any, List
from app.crud.crud_task_statistic import crud_task_statistic
from app.schemas.task_group import TaskGroupUpdate
from starlette.responses import StreamingResponse
from app.api.deps import (
check_if_user_can_access_course,
get_current_active_superuser,
get_current_active_user,
get_db,
)
from app.core.export.task_exporter import TaskExporter
from app.crud.crud_course import crud_course
from app.crud.crud_task import crud_task
from app.crud.crud_task_group import crud_task_group
from app.crud.crud_user_solution import crud_user_solution
from app.models.user import User
from app.schemas.task_group import TaskGroup, TaskGroupCreate, TaskGroupDetail
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
router = APIRouter()
@router.get("", response_model=List[TaskGroup])
def get_task_groups_by_course(
*,
db: Session = Depends(get_db),
course_id: int,
current_user: User = Depends(get_current_active_user)
):
task_groups = crud_task_group.get_multi_by_course_id(db, course_id=course_id)
percentage_solved = 0.0
for task_group in task_groups:
percentage = crud_user_solution.get_solved_percentage_to_task_group(
db, user_id=current_user.id, task_group_id=task_group.id
)[0]
task_group_length = len(task_group.tasks)
if percentage and task_group_length:
percentage_solved += float(percentage)
task_group.percentage_solved = percentage / task_group_length
else:
task_group.percentage_solved = 0
return task_groups
@router.post("", response_model=TaskGroup)
def create_task_group(
*,
db: Session = Depends(get_db),
task_group_in: TaskGroupCreate,
current_user: User = Depends(get_current_active_superuser)
):
check_if_user_can_access_course(
db, user_id=current_user.id, course_id=task_group_in.course_id
)
task_group_duplicate = crud_task_group.get_by_name(
db, name=task_group_in.name, course_id=task_group_in.course_id
)
if task_group_duplicate:
raise HTTPException(status_code=400, detail="TaskGroup name already exists")
task_group = crud_task_group.create(db, obj_in=task_group_in)
return task_group
@router.get("/{short_name}", response_model=TaskGroupDetail)
def get_task_group(
*,
db: Session = Depends(get_db),
short_name: str,
current_user: User = Depends(get_current_active_user)
) -> Any:
task_group = crud_task_group.get_by_short_name(db, short_name=short_name)
if crud_course.is_not_member_and_owner(
db, course_id=task_group.course_id, user_id=current_user.id
):
course = crud_course.get(db, id=task_group.course_id)
raise HTTPException(
status_code=403,
detail={
"course": {
"name": course.name,
"short_name": course.short_name,
"owner": {
"firstname": course.owner.firstname,
"lastname": course.owner.lastname,
},
}
},
)
task_group_percentage = 0.0
task_count = 0
new_tasks = []
for task in task_group.tasks:
new_task_count = 0
if not task.enabled and not current_user.is_superuser:
continue
base_task_percentage = float(
crud_user_solution.get_solved_percentage_to_base_task(
db, user_id=current_user.id, base_task_id=task.id
)[0]
or 0.0
)
if crud_task.has_new_task(db, user_id=current_user.id, base_task_id=task.id):
new_task_count += 1
task_group_percentage += base_task_percentage
task_len = len(task.tasks)
task_count += task_len
task.task_count = task_len
task.new_tasks = new_task_count
if task.tasks:
task.percentage_solved = base_task_percentage / len(task.tasks)
else:
task.percentage_solved = 0
task.correct_tasks = (
crud_user_solution.get_amount_of_correct_solutions_to_base_task(
db, user_id=current_user.id, base_task_id=task.id
)
)
task.wrong_tasks = (
crud_user_solution.get_amount_of_wrong_solutions_to_base_task(
db, user_id=current_user.id, base_task_id=task.id
)
)
del task.tasks
new_tasks.append(task)
if task_count != 0:
task_group.percentage_solved = task_group_percentage / task_count
else:
task_group.percentage_solved = 0.0
task_group.task_count = task_count
task_group.tasks = new_tasks
course = crud_course.get(db=db, id=task_group.course_id)
task_group.course_short_name = course.short_name
return task_group
@router.delete("/{short_name}", response_model=TaskGroup)
def remove_task_group(
*,
db: Session = Depends(get_db),
short_name: str,
current_user: User = Depends(get_current_active_user)
):
task_group = crud_task_group.get_by_short_name(db, short_name=short_name)
check_if_user_can_access_course(
db, user_id=current_user.id, course_id=task_group.course_id
)
for task in task_group.tasks:
crud_task_statistic.remove_all_by_base_task_id(db, base_task_id=task.id)
crud_user_solution.remove_all_to_task_group(db, task_group_id=task_group.id)
deleted_task_group = crud_task_group.remove(db, model_id=task_group.id)
return deleted_task_group
@router.get(
"/{short_name}/userSolution/download",
response_model=Any,
response_description="xlsx",
)
def download_usersolutions(
*,
db: Session = Depends(get_db),
short_name: str,
current_user: User = Depends(get_current_active_superuser)
) -> Any:
task_group = crud_task_group.get_by_short_name(db, short_name=short_name)
check_if_user_can_access_course(
db, user_id=current_user.id, course_id=task_group.course_id
)
output = TaskExporter.export_point_task_group_as_xlsx(db, task_group)
headers = {
"Content-Disposition": 'attachment; filename="' + task_group.short_name + '"'
}
return StreamingResponse(output, headers=headers)
@router.put("", response_model=TaskGroup)
def update_task_group(
*,
db: Session = Depends(get_db),
current_user: User = Depends(get_current_active_superuser),
obj_in: TaskGroupUpdate
) -> TaskGroup:
task_group = crud_task_group.get(db, id=obj_in.task_group_id)
check_if_user_can_access_course(
db, user_id=current_user.id, course_id=task_group.course_id
)
task_group_duplicate = crud_task_group.get_by_name(
db, name=obj_in.name, course_id=task_group.course_id
)
if task_group_duplicate:
raise HTTPException(status_code=400, detail="TaskGroup name already exists")
task_group = crud_task_group.update(db, db_obj=task_group, obj_in=obj_in)
return task_group
```
#### File: core/solver/task_result_factory.py
```python
from app.schemas.task import TaskFeedback, TaskStatus, AnnotationFeedback
class TaskResultFactory:
@staticmethod
def correct_status(task_result: TaskFeedback) -> TaskFeedback:
"""
Returns TaskResult with correct status
:param task_result: TaskResult object
:return: The TaskResult with correct status
"""
task_result.task_status = TaskStatus.CORRECT
task_result.response_text = "Richtig gelöst!"
return task_result
@staticmethod
def partial_status(task_result: TaskFeedback) -> TaskFeedback:
"""
Returns TaskResult with partial status
:param task_result: TaskResult object
:return: The TaskResult with partial status
"""
task_result.task_status = TaskStatus.PARTIAL
task_result.response_text = "Einige Annotationen sind noch nicht richtig!"
return task_result
@staticmethod
def wrong_status(task_result: TaskFeedback) -> TaskFeedback:
"""
Return TaskResult with wrong status
:param task_result: TaskResult object
:return: The TaskResult with wrong status
"""
task_result.task_status = TaskStatus.WRONG
task_result.response_text = (
"Deine Annotationen sind nicht an der richtigen Stelle."
)
return task_result
@staticmethod
def wrong_name_status(task_result: TaskFeedback) -> TaskFeedback:
"""
Return TaskResult with wrong_name status
:param task_result: TaskResult object
:return: The TaskResult with wrong_name status
"""
task_result.task_status = TaskStatus.WRONG_NAME
task_result.response_text = "Deine Annotationen sind an der richtigen Stelle, aber bei den Klassennanmen musst du nochmal schauen"
return task_result
@staticmethod
def append_result_detail(
*,
annotation_id: str,
status: TaskStatus,
percentage: float,
task_result: TaskFeedback
) -> TaskFeedback:
"""
Appends a new TaskResultDetail to the given TaskResult
:param annotation_id: Id of the annotation
:param status: Status of the annotation
:param percentage: Percentage of the annotation
:param task_result: TaskResult where the TaskResultDetail should be added to
:return: The TaskResult with the added TaskResultDetail
"""
task_result.result_detail.append(
AnnotationFeedback(id=annotation_id, status=status, percentage=percentage)
)
return task_result
```
#### File: app/crud/crud_task_group.py
```python
from typing import List, Optional
from sqlalchemy.orm import Session
from app.crud.base import CRUDBase
from app.models.task_group import TaskGroup
from app.schemas.task_group import TaskGroupCreate, TaskGroupUpdate
class CRUDTaskGroup(CRUDBase[TaskGroup, TaskGroupCreate, TaskGroupUpdate]):
def get_multi_by_course_id(self, db: Session, *, course_id: int) -> List[TaskGroup]:
"""
Returns all TaskGroups to the given course.
:param db: DB-Session
:param course_id: id of the course
:return: All found TaskGroups
"""
return db.query(self.model).filter(TaskGroup.course_id == course_id).all()
def get_by_short_name(self, db: Session, *, short_name: str) -> TaskGroup:
"""
Returns the TaskGroup matching to the shortname.
:param db: DB-Session
:param short_name: short name of the TaskGroup
:return: The found TaskGroup
"""
return db.query(self.model).filter(TaskGroup.short_name == short_name).first()
def get_by_name(self, db: Session, *, name: str, course_id: int) -> Optional[str]:
"""
Returns the TaskGroup to the name in the given course.
:param db: DB-Session
:param name: Name of the TaskGroup
:param course_id: Id of the Course
:return: The found TaskGroup
"""
return (
db.query(self.model)
.filter(TaskGroup.name == name)
.filter(TaskGroup.course_id == course_id)
.first()
)
crud_task_group = CRUDTaskGroup(TaskGroup)
```
#### File: app/crud/crud_task_statistic.py
```python
import json
from typing import List
from sqlalchemy import text
from sqlalchemy.orm import Session
from app.crud.base import CRUDBase
from app.models.task_statistic import TaskStatistic
from app.schemas.task_statistic import (
TaskStatisticCreate,
TaskStatisticUpdate,
TaskStatistic as TaskStatisticSchema,
)
class CRUDTaskStatistic(
CRUDBase[TaskStatistic, TaskStatisticCreate, TaskStatisticUpdate]
):
def get_oldest_task_statistics_to_base_task_id(
self, db: Session, *, base_task_id: int
) -> [List[TaskStatisticSchema], List[int]]:
"""
Returns all statistics to the given base task
:param db: DB-Session
:param base_task_id: Id of the base task
:return: All found task statistics and task ids
"""
sql = text(
"""SELECT ts.* FROM taskstatistic as ts
join (SELECT user_id, task_id, min(solved_date) as smallest_date
FROM taskstatistic
where base_task_id = :base_task_id
GROUP BY task_id, user_id) as smallest
on ts.user_id = smallest.user_id and ts.task_id = smallest.task_id and
ts.solved_date = smallest.smallest_date;"""
)
result = db.execute(sql, {"base_task_id": base_task_id}).fetchall()
task_statistics = []
task_ids = set()
for row in result:
task_statistics.append(
TaskStatisticSchema(
id=row[0],
user_id=row[1],
task_id=row[2],
base_task_id=row[3],
solved_date=row[4],
percentage_solved=float(row[5]),
solution_data=json.loads(row[6]),
task_result=json.loads(row[7]),
)
)
task_ids.add(row[2])
return task_statistics, task_ids
def remove_all_by_task_id(
self, db: Session, *, task_id: int
) -> List[TaskStatistic]:
"""
Removes all task statistic to the given task
:param db: DB-Session
:param task_id: Id of the task
:return: The deleted task statistics
"""
db_objs = db.query(self.model).filter(self.model.task_id == task_id).all()
for obj in db_objs:
db.delete(obj)
db.commit()
return db_objs
def remove_all_by_base_task_id(
self, db: Session, *, base_task_id: int
) -> List[TaskStatistic]:
"""
Removes all task statistics to the given base task
:param db: DB-Session
:param base_task_id: Id of the base task
:return: The deleted task statistics
"""
db_objs = (
db.query(self.model).filter(self.model.base_task_id == base_task_id).all()
)
for obj in db_objs:
db.delete(obj)
db.commit()
return db_objs
crud_task_statistic = CRUDTaskStatistic(TaskStatistic)
```
#### File: app/crud/crud_user_solution.py
```python
from typing import List, Tuple
from sqlalchemy import and_, func
from sqlalchemy.orm import Session
from app.crud.base import CRUDBase
from app.models.user_solution import UserSolution
from app.schemas.user_solution import UserSolution as SchemaSolution
from app.schemas.user_solution import UserSolutionCreate, UserSolutionUpdate
class CRUDUserSolution(CRUDBase[UserSolution, UserSolutionCreate, UserSolutionUpdate]):
def get_solution_to_task_and_user(
self, db: Session, *, user_id: int, task_id: int
) -> SchemaSolution:
"""
Returns the UserSolution to the given user and task.
:param db: DB-Session
:param user_id: if of the user
:param task_id: id of the task
:return: The found UserSolution
"""
return (
db.query(self.model)
.filter(UserSolution.user_id == user_id)
.filter(UserSolution.task_id == task_id)
.first()
)
def get_solution_to_task(
self, db: Session, *, task_id: int
) -> List[SchemaSolution]:
"""
Returns all user solutions to the given task
:param db: DB-Session
:param task_id: Id of the task
:return: All found user solutions
"""
return db.query(self.model).filter(UserSolution.task_id == task_id).all()
def remove_by_user_id_and_task_id(
self, db: Session, *, user_id: int, task_id: int
) -> SchemaSolution:
"""
Removes Solution of the given user to the given task.
:param db: DB-Session
:param user_id: id of the user
:param task_id: id of the task
:return:
"""
db_obj = (
db.query(self.model)
.filter(UserSolution.user_id == user_id)
.filter(UserSolution.task_id == task_id)
.first()
)
db.delete(db_obj)
db.commit()
return db_obj
def remove_all_by_task_id(
self, db: Session, *, task_id: int
) -> List[SchemaSolution]:
"""
Removes all UserSolutions to the task.
:param db: DB-Session
:param task_id: id of the Task
:return: The deleted UserSolutions
"""
db_objs = db.query(self.model).filter(UserSolution.task_id == task_id).all()
for obj in db_objs:
db.delete(obj)
db.commit()
return db_objs
def remove_all_by_user_to_course(
self, db: Session, user_id: int, course_id: int
) -> List[SchemaSolution]:
"""
Removes all UserSolution of the User to a Course.
:param db: DB-Session
:param user_id: Id of the User
:param course_id: Id of the Course
:return: The deleted UserSolution
"""
db_objs = (
db.query(self.model)
.filter(UserSolution.course_id == course_id)
.filter(UserSolution.user_id == user_id)
.all()
)
for obj in db_objs:
db.delete(obj)
db.commit()
return db_objs
def remove_all_to_course(self, db: Session, course_id: int) -> List[SchemaSolution]:
"""
Removes all UserSolutions to a Course
:param db: DB-Session
:param course_id: Id of the Course
:return: The deleted UserSolutions
"""
db_objs = db.query(self.model).filter(UserSolution.course_id == course_id).all()
for obj in db_objs:
db.delete(obj)
db.commit()
return db_objs
def remove_all_to_task_group(
self, db: Session, task_group_id: int
) -> List[SchemaSolution]:
"""
Removes all UserSolutions to the TaskGroup
:param db: DB-Session
:param task_group_id: Id of the TaskGroup
:return: The deleted UserSolutions
"""
db_objs = (
db.query(self.model)
.filter(UserSolution.task_group_id == task_group_id)
.all()
)
for obj in db_objs:
db.delete(obj)
db.commit()
return db_objs
def remove_all_to_base_task(
self, db: Session, base_task_id: int
) -> List[SchemaSolution]:
"""
Removes all UserSolutions to the BaseTask
:param db: DB-Session
:param base_task_id: Id of the BaseTask
:return: The deleted UserSolutions
"""
db_objs = (
db.query(self.model).filter(UserSolution.base_task_id == base_task_id).all()
)
for obj in db_objs:
db.delete(obj)
db.commit()
return db_objs
def get_solved_percentage_to_task_group(
self, db: Session, *, user_id: int, task_group_id: int
) -> Tuple:
"""
Returns the percentage of the user solved tasks to the given TaskGroup.
:param db: DB-Session
:param user_id: id of the user
:param task_group_id: id of the TaskGroup
:return: Tuple with percentage as Decimal
"""
query = (
db.query(func.sum(self.model.percentage_solved).label("percentage_solved"))
.filter(self.model.user_id == user_id)
.filter(self.model.task_group_id == task_group_id)
)
return query.first()
def get_solved_percentage_to_base_task(
self, db: Session, *, user_id: int, base_task_id: int
) -> Tuple:
"""
Returns the percentage of the user solved tasks to the given BaseTask.
:param db: DB-Session
:param user_id: id of the user
:param base_task_id: id of the BaseTask
:return: Tuple with the percentage as Decimal
"""
return (
db.query(func.sum(self.model.percentage_solved).label("percentage_solved"))
.filter(self.model.user_id == user_id)
.filter(self.model.base_task_id == base_task_id)
.first()
)
def get_solved_percentage_to_course(
self, db: Session, *, user_id: int, course_id: int
) -> int:
"""
Returns the percentage of the user solved tasks to the given Course
:param db: DB-Session
:param user_id: id of the user
:param course_id: id of the course
:return: The solved percentage
"""
return (
db.query(func.sum(self.model.percentage_solved).label("percentage_solved"))
.filter(self.model.user_id == user_id)
.filter(self.model.course_id == course_id)
.first()[0]
or 0.0
)
def get_amount_of_correct_solutions_to_course(
self, db: Session, *, user_id: int, course_id: int
) -> int:
"""
Returns the amount correct solved tasks to the course
:param db: DB-Session
:param user_id: id of the user
:param course_id: id of the course
:return: The amount of correct solutions
"""
return self.__get_amount_of_correct_solution(
db, user_id=user_id, id_name="course_id", id_value=course_id
)
def get_amount_of_wrong_solutions_to_course(
self, db: Session, *, user_id: int, course_id: int
) -> int:
"""
Returns the amount wrong solved tasks to the course
:param db: DB-Session
:param user_id: id of the user
:param course_id: id of the course
:return: The amount auf wrong solutions
"""
return self.__get_amount_of_wrong_solutions(
db, user_id=user_id, id_name="course_id", id_value=course_id
)
def get_amount_of_correct_solutions_to_task_group(
self, db: Session, *, user_id: int, task_group_id: int
) -> int:
"""
Returns the amount correct solved tasks to the task group
:param db: DB-Session
:param user_id: id of the user
:param task_group_id: id of the course
:return: The amount of correct solutions
"""
return self.__get_amount_of_correct_solution(
db, user_id=user_id, id_name="task_group_id", id_value=task_group_id
)
def get_amount_of_wrong_solutions_to_task_group(
self, db: Session, *, user_id: int, task_group_id: int
) -> int:
"""
Returns the amount wrong solved tasks to the task group
:param db: DB-Session
:param user_id: id of the user
:param task_group_id: id of the task group
:return: The amount of wrong solutions
"""
return self.__get_amount_of_wrong_solutions(
db, user_id=user_id, id_name="task_group_id", id_value=task_group_id
)
def get_amount_of_correct_solutions_to_base_task(
self, db: Session, *, user_id: int, base_task_id: int
) -> int:
"""
Returns the amount correct solved tasks to the base task
:param db: DB-Session
:param user_id: id of the user
:param base_task_id: id of the course
:return: The amount of correct solutions
"""
return self.__get_amount_of_correct_solution(
db, user_id=user_id, id_name="base_task_id", id_value=base_task_id
)
def get_amount_of_wrong_solutions_to_base_task(
self, db: Session, *, user_id: int, base_task_id: int
) -> int:
"""
Returns the amount wrong solved tasks to the base task
:param db: DB-Session
:param user_id: id of the user
:param base_task_id: id of the base task
:return: The amount of wrong solutions
"""
return self.__get_amount_of_wrong_solutions(
db, user_id=user_id, id_name="base_task_id", id_value=base_task_id
)
def increment_failed_attempts(self, db: Session, user_id: int, task_id: int) -> int:
model = self.get_solution_to_task_and_user(db, user_id=user_id, task_id=task_id)
new_attempts = model.failed_attempts + 1
model.failed_attempts = new_attempts
db.add(model)
db.commit()
db.refresh(model)
return new_attempts
def __get_amount_of_wrong_solutions(
self, db: Session, *, user_id: int, id_name: str, id_value: int
) -> int:
return (
db.query(func.count())
.filter(self.model.user_id == user_id)
.filter(getattr(self.model, id_name) == id_value)
.filter(
and_(
func.JSON_LENGTH(self.model.task_result) != 1,
self.model.percentage_solved != 1.00,
)
)
.first()[0]
or 0.0
)
def __get_amount_of_correct_solution(
self, db: Session, user_id: int, id_name: str, id_value: int
) -> int:
return (
db.query(func.count())
.filter(self.model.user_id == user_id)
.filter(getattr(self.model, id_name) == id_value)
.filter(self.model.percentage_solved == 1.00)
.first()[0]
or 0.0
)
crud_user_solution = CRUDUserSolution(UserSolution)
```
#### File: app/utils/minio_client.py
```python
import json
import os
from typing import Any
from app.core.config import settings
from minio import Minio
from minio.deleteobjects import DeleteObject
def policy(bucket_name):
return {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"AWS": ["*"]},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::" + bucket_name + "/*",
}
],
}
class MinioClient:
hint_bucket = "hint-images"
task_bucket = "task-images"
def __init__(self):
self.instance = Minio(
endpoint=settings.MINIO_URL,
access_key=settings.MINIO_ROOT_USER,
secret_key=settings.MINIO_ROOT_PASSWORD,
secure=False,
)
self.bucket = None
self.bucket_name = None
def create_bucket(self, bucket_name: str) -> None:
self.bucket = self.instance.bucket_exists(bucket_name)
self.bucket_name = bucket_name
if not self.bucket:
self.bucket = self.instance.make_bucket(bucket_name, "eu")
print("✔️ Bucket created")
self.instance.set_bucket_policy(
self.bucket_name, json.dumps(policy(self.bucket_name))
)
print("✔️ Bucket policy created")
else:
print("Bucket already exists")
def create_object(self, file_name: str, file_content: Any, content_type: Any):
try:
print(file_name, self.bucket_name)
self.instance.fput_object(
self.bucket_name,
file_name,
file_content,
metadata={"Content-type": content_type},
)
print(f"✔️ {file_name} has been created")
except Exception as exc:
print(f"❌ {file_name} couldn't be created")
print(exc)
raise Exception()
def delete_object(self, file_name: str):
self.instance.remove_object(self.bucket_name, file_name)
print(f"❌ {file_name} has ben deleted")
def delete_slide(self, slide_id: str):
# self.instance.remove_object(self.bucket_name, slide_id + '/')
self.delete_folder(slide_id)
def delete_folder(self, folder_path: str):
# objects_to_delete = self.instance.list_objects(self.bucket_name, prefix=folder_path, recursive=True)
delete_object_list = map(
lambda x: DeleteObject(x.object_name),
self.instance.list_objects(
self.bucket_name, prefix=folder_path, recursive=True
),
)
errors = self.instance.remove_objects(self.bucket_name, delete_object_list)
for error in errors:
print(error)
# for obj in objects_to_delete:
# self.instance.remove_object(self.bucket_name, obj.object_name)
def delete_all_objects(self):
objects = self.instance.list_objects(self.bucket_name)
for item in objects:
self.instance.remove_object(self.bucket_name, item.object_name)
print("✔️ All Objects deleted")
def get_object(self, file_name: str):
return self.instance.get_object(self.bucket_name, object_name=file_name)
minio_client = MinioClient()
```
#### File: app/crud/crud_slide.py
```python
from typing import List
from app.crud.base import CRUDBase
from app.schemas.slide import CreateSlide, Slide, SlideStatus, UpdateSlide
from pymongo.collection import Collection
class CRUDSlide(CRUDBase[Slide, CreateSlide, UpdateSlide]):
def slide_with_name_exists(self, *, collection: Collection, name: str) -> bool:
return collection.count_documents({"name": name}, limit=1) != 0
def get_all_slides(
self,
*,
collection: Collection,
status: SlideStatus = None,
with_metadata: bool = True
) -> List[Slide]:
filter_query = {"_id": False, "metadata": with_metadata}
where_query = {}
if status != None:
where_query["status"] = status
return self.get_multi(
collection, where_query=where_query, filter_query=filter_query
)
def get_all_slides_by_ids(
self,
*,
collection: Collection,
slide_ids: List[str],
status: SlideStatus = None,
with_metadata: bool = True
) -> List[Slide]:
filter_query = {"_id": False, "metadata": with_metadata}
where_query = {}
if status != None:
where_query["status"] = status
return self.get_multi_by_ids(
collection, slide_ids, where_query=where_query, filter_query=filter_query
)
def get_slide(
self, *, collection: Collection, slide_id: str, with_metadata: bool = True
) -> Slide:
return self.get(
collection=collection,
entity_id_value=slide_id,
filter_qurey={"_id": False, "metadata": with_metadata},
)
crud_slide = CRUDSlide(Slide, "slide_id")
```
#### File: app/utils/slide_utils.py
```python
import base64
import os
from typing import Any, Dict, List, Tuple
from app.schemas.slide import Slide
def convert_binary_to_base64(binary_data: bytes):
"""
Converts bytes to base64
:param binary_data: Data to convert
:return: The data in base64
"""
return base64.b64encode(binary_data)
def is_byte_data(data: Any):
"""
Checks if the given data is of type byte
:param data: The data to check
:return: Whether the data is of type bytes or not
"""
return type(data) is bytes
def convert_slide_binary_metadata_to_base64(slide: Slide) -> List[Slide]:
"""
Converts all binary data contained in the slide metadata to base64
"""
if slide.metadata is not None:
for metadata_key, metadata_value in slide.metadata.items():
if is_byte_data(metadata_value):
slide.metadata[metadata_key] = convert_binary_to_base64(metadata_value)
return slide
def convert_binary_metadata_to_base64(slides: List[Slide]) -> List[Slide]:
"""
Converts all binary data contained in the slide metadata to base64
:param slides: The slides to convert the metadata from
:return: The slides without binary metadata
"""
for slide in slides:
if slide.metadata is not None:
for metadata_key, metadata_value in slide.metadata.items():
if is_byte_data(metadata_value):
slide.metadata[metadata_key] = convert_binary_to_base64(
metadata_value
)
return slides
def openslide_can_load(file_extension: str) -> bool:
"""
Checks if the given file extension can be loaded by openslide.
:param file_extension: The file extension should be checked
:return: If the file extension can be loaded by openslide or not
"""
OPENSLIDE_FORMATS = [
"svs",
"tif",
"vms",
"vmu",
"ndpi",
"scn",
"mrxs",
"tiff",
"svslide",
"bif",
]
return file_extension.lower() in OPENSLIDE_FORMATS
def get_file_name_and_file_extension(file_name_with_extension: str) -> Tuple[str, str]:
"""
Splits the extension of the file name
:param: file name with extension
:return: file name and file extension
"""
return os.path.splitext(file_name_with_extension)
def remove_truth_values_from_dict(
dict_to_be_filtered: Dict[Any, Any]
) -> Dict[Any, Any]:
"""
Removes all entries in the given dict which have 'True' as value
:param: Dict to filter
:return: Filtered dict
"""
query = {}
if dict_to_be_filtered:
for key in dict_to_be_filtered:
if not dict_to_be_filtered[key]:
query[key] = dict_to_be_filtered[key]
return query
def delete_keys_from_dict(dict_del: Dict, keys_to_delete: List[str]) -> Dict:
"""
Delets the given keys from the given dict
:param dict_del: dict to delete keys from
:param keys_to_delete: All Keys that should be deleted
:return: The dict without the deleted keys
"""
for k in keys_to_delete:
try:
del dict_del[k]
except KeyError:
pass
for v in dict_del.values():
if isinstance(v, dict):
delete_keys_from_dict(v, keys_to_delete)
return dict_del
``` |
{
"source": "Jinnrry/reversi-alpha-zero",
"score": 2
} |
#### File: reversi_zero/agent/player.py
```python
from _asyncio import Future
from asyncio.queues import Queue
from collections import defaultdict, namedtuple
from logging import getLogger
import asyncio
import numpy as np
from numpy.random import random
from reversi_zero.agent.api import ReversiModelAPI
from reversi_zero.config import Config
from reversi_zero.env.reversi_env import ReversiEnv, Player, Winner, another_player
from reversi_zero.lib.bitboard import find_correct_moves, bit_to_array, flip_vertical, rotate90, dirichlet_noise_of_mask
# from reversi_zero.lib.reversi_solver import ReversiSolver
from reversi_zero.lib.alt.reversi_solver import ReversiSolver
CounterKey = namedtuple("CounterKey", "black white next_player")
QueueItem = namedtuple("QueueItem", "state future")
HistoryItem = namedtuple("HistoryItem", "action policy values visit enemy_values enemy_visit")
CallbackInMCTS = namedtuple("CallbackInMCTS", "per_sim callback")
MCTSInfo = namedtuple("MCTSInfo", "var_n var_w var_p")
ActionWithEvaluation = namedtuple("ActionWithEvaluation", "action n q")
logger = getLogger(__name__)
class ReversiPlayer:
def __init__(self, config: Config, model, play_config=None, enable_resign=True, mtcs_info=None, api=None):
"""
:param config:
:param reversi_zero.agent.model.ReversiModel|None model:
:param MCTSInfo mtcs_info:
:parameter ReversiModelAPI api:
"""
self.config = config
self.model = model
self.play_config = play_config or self.config.play
self.enable_resign = enable_resign
self.api = api or ReversiModelAPI(self.config, self.model)
# key=(own, enemy, action)
mtcs_info = mtcs_info or self.create_mtcs_info()
self.var_n, self.var_w, self.var_p = mtcs_info
self.expanded = set(self.var_p.keys())
self.now_expanding = set()
self.prediction_queue = Queue(self.play_config.prediction_queue_size)
self.sem = asyncio.Semaphore(self.play_config.parallel_search_num)
self.moves = []
self.loop = asyncio.get_event_loop()
self.running_simulation_num = 0
self.callback_in_mtcs = None
self.thinking_history = {} # for fun
self.resigned = False
self.requested_stop_thinking = False
self.solver = self.create_solver()
@staticmethod
def create_mtcs_info():
return MCTSInfo(defaultdict(lambda: np.zeros((64,))),
defaultdict(lambda: np.zeros((64,))),
defaultdict(lambda: np.zeros((64,))))
def var_q(self, key):
return self.var_w[key] / (self.var_n[key] + 1e-5)
def action(self, own, enemy, callback_in_mtcs=None):
"""
:param own: BitBoard
:param enemy: BitBoard
:param CallbackInMCTS callback_in_mtcs:
:return action=move pos=0 ~ 63 (0=top left, 7 top right, 63 bottom right)
"""
action_with_eval = self.action_with_evaluation(own, enemy, callback_in_mtcs=callback_in_mtcs)
return action_with_eval.action
def action_with_evaluation(self, own, enemy, callback_in_mtcs=None):
"""
:param own: BitBoard
:param enemy: BitBoard
:param CallbackInMCTS callback_in_mtcs:
:rtype: ActionWithEvaluation
:return ActionWithEvaluation(
action=move pos=0 ~ 63 (0=top left, 7 top right, 63 bottom right),
n=N of the action,
q=W/N of the action,
)
"""
env = ReversiEnv().update(own, enemy, Player.black)
key = self.counter_key(env)
self.callback_in_mtcs = callback_in_mtcs
pc = self.play_config
if pc.use_solver_turn and env.turn >= pc.use_solver_turn:
ret = self.action_by_searching(key)
if ret: # not save move as play data
return ret
for tl in range(self.play_config.thinking_loop):
if env.turn > 0:
self.search_moves(own, enemy)
else:
self.bypass_first_move(key)
policy = self.calc_policy(own, enemy)
action = int(np.random.choice(range(64), p=policy))
action_by_value = int(np.argmax(self.var_q(key) + (self.var_n[key] > 0)*100))
value_diff = self.var_q(key)[action] - self.var_q(key)[action_by_value]
if env.turn <= pc.start_rethinking_turn or self.requested_stop_thinking or \
(value_diff > -0.01 and self.var_n[key][action] >= pc.required_visit_to_decide_action):
break
# this is for play_gui, not necessary when training.
self.update_thinking_history(own, enemy, action, policy)
if self.play_config.resign_threshold is not None and\
np.max(self.var_q(key) - (self.var_n[key] == 0)*10) <= self.play_config.resign_threshold:
self.resigned = True
if self.enable_resign:
if env.turn >= self.config.play.allowed_resign_turn:
return ActionWithEvaluation(None, 0, 0) # means resign
else:
logger.debug(f"Want to resign but disallowed turn {env.turn} < {self.config.play.allowed_resign_turn}")
saved_policy = self.calc_policy_by_tau_1(key) if self.config.play_data.save_policy_of_tau_1 else policy
self.add_data_to_move_buffer_with_8_symmetries(own, enemy, saved_policy)
return ActionWithEvaluation(action=action, n=self.var_n[key][action], q=self.var_q(key)[action])
def update_thinking_history(self, black, white, action, policy):
key = CounterKey(black, white, Player.black.value)
next_key = self.get_next_key(black, white, action)
self.thinking_history[(black, white)] = \
HistoryItem(action, policy, list(self.var_q(key)), list(self.var_n[key]),
list(self.var_q(next_key)), list(self.var_n[next_key]))
def bypass_first_move(self, key):
legal_array = bit_to_array(find_correct_moves(key.black, key.white), 64)
action = np.argmax(legal_array)
self.var_n[key][action] = 1
self.var_w[key][action] = 0
self.var_p[key] = legal_array / np.sum(legal_array)
def action_by_searching(self, key):
action, score = self.solver.solve(key.black, key.white, Player(key.next_player), exactly=True)
if action is None:
return None
# logger.debug(f"action_by_searching: score={score}")
policy = np.zeros(64)
policy[action] = 1
self.var_n[key][action] = 999
self.var_w[key][action] = np.sign(score) * 999
self.var_p[key] = policy
self.update_thinking_history(key.black, key.white, action, policy)
return ActionWithEvaluation(action=action, n=999, q=np.sign(score))
def stop_thinking(self):
self.requested_stop_thinking = True
def add_data_to_move_buffer_with_8_symmetries(self, own, enemy, policy):
for flip in [False, True]:
for rot_right in range(4):
own_saved, enemy_saved, policy_saved = own, enemy, policy.reshape((8, 8))
if flip:
own_saved = flip_vertical(own_saved)
enemy_saved = flip_vertical(enemy_saved)
policy_saved = np.flipud(policy_saved)
if rot_right:
for _ in range(rot_right):
own_saved = rotate90(own_saved)
enemy_saved = rotate90(enemy_saved)
policy_saved = np.rot90(policy_saved, k=-rot_right)
self.moves.append([(own_saved, enemy_saved), list(policy_saved.reshape((64, )))])
def get_next_key(self, own, enemy, action):
env = ReversiEnv().update(own, enemy, Player.black)
env.step(action)
return self.counter_key(env)
def ask_thought_about(self, own, enemy) -> HistoryItem:
return self.thinking_history.get((own, enemy))
def search_moves(self, own, enemy):
loop = self.loop
self.running_simulation_num = 0
self.requested_stop_thinking = False
coroutine_list = []
for it in range(self.play_config.simulation_num_per_move):
cor = self.start_search_my_move(own, enemy)
coroutine_list.append(cor)
coroutine_list.append(self.prediction_worker())
loop.run_until_complete(asyncio.gather(*coroutine_list))
async def start_search_my_move(self, own, enemy):
self.running_simulation_num += 1
root_key = self.counter_key(ReversiEnv().update(own, enemy, Player.black))
with await self.sem: # reduce parallel search number
if self.requested_stop_thinking:
self.running_simulation_num -= 1
return None
env = ReversiEnv().update(own, enemy, Player.black)
leaf_v = await self.search_my_move(env, is_root_node=True)
self.running_simulation_num -= 1
if self.callback_in_mtcs and self.callback_in_mtcs.per_sim > 0 and \
self.running_simulation_num % self.callback_in_mtcs.per_sim == 0:
self.callback_in_mtcs.callback(list(self.var_q(root_key)), list(self.var_n[root_key]))
return leaf_v
async def search_my_move(self, env: ReversiEnv, is_root_node=False):
"""
Q, V is value for this Player(always black).
P is value for the player of next_player (black or white)
:param env:
:param is_root_node:
:return:
"""
if env.done:
if env.winner == Winner.black:
return 1
elif env.winner == Winner.white:
return -1
else:
return 0
key = self.counter_key(env)
another_side_key = self.another_side_counter_key(env)
if self.config.play.use_solver_turn_in_simulation and \
env.turn >= self.config.play.use_solver_turn_in_simulation:
action, score = self.solver.solve(key.black, key.white, Player(key.next_player), exactly=False)
if action:
score = score if env.next_player == Player.black else -score
leaf_v = np.sign(score)
leaf_p = np.zeros(64)
leaf_p[action] = 1
self.var_n[key][action] += 1
self.var_w[key][action] += leaf_v
self.var_p[key] = leaf_p
self.var_n[another_side_key][action] += 1
self.var_w[another_side_key][action] -= leaf_v
self.var_p[another_side_key] = leaf_p
return np.sign(score)
while key in self.now_expanding:
await asyncio.sleep(self.config.play.wait_for_expanding_sleep_sec)
# is leaf?
if key not in self.expanded: # reach leaf node
leaf_v = await self.expand_and_evaluate(env)
if env.next_player == Player.black:
return leaf_v # Value for black
else:
return -leaf_v # Value for white == -Value for black
virtual_loss = self.config.play.virtual_loss
virtual_loss_for_w = virtual_loss if env.next_player == Player.black else -virtual_loss
action_t = self.select_action_q_and_u(env, is_root_node)
_, _ = env.step(action_t)
self.var_n[key][action_t] += virtual_loss
self.var_w[key][action_t] -= virtual_loss_for_w
leaf_v = await self.search_my_move(env) # next move
# on returning search path
# update: N, W
self.var_n[key][action_t] += - virtual_loss + 1
self.var_w[key][action_t] += virtual_loss_for_w + leaf_v
# update another side info(flip color and player)
self.var_n[another_side_key][action_t] += 1
self.var_w[another_side_key][action_t] -= leaf_v # must flip the sign.
return leaf_v
async def expand_and_evaluate(self, env):
"""expand new leaf
update var_p, return leaf_v
:param ReversiEnv env:
:return: leaf_v
"""
key = self.counter_key(env)
another_side_key = self.another_side_counter_key(env)
self.now_expanding.add(key)
black, white = env.board.black, env.board.white
# (di(p), v) = fθ(di(sL))
# rotation and flip. flip -> rot.
is_flip_vertical = random() < 0.5
rotate_right_num = int(random() * 4)
if is_flip_vertical:
black, white = flip_vertical(black), flip_vertical(white)
for i in range(rotate_right_num):
black, white = rotate90(black), rotate90(white) # rotate90: rotate bitboard RIGHT 1 time
black_ary = bit_to_array(black, 64).reshape((8, 8))
white_ary = bit_to_array(white, 64).reshape((8, 8))
state = [black_ary, white_ary] if env.next_player == Player.black else [white_ary, black_ary]
future = await self.predict(np.array(state)) # type: Future
await future
leaf_p, leaf_v = future.result()
# reverse rotate and flip about leaf_p
if rotate_right_num > 0 or is_flip_vertical: # reverse rotation and flip. rot -> flip.
leaf_p = leaf_p.reshape((8, 8))
if rotate_right_num > 0:
leaf_p = np.rot90(leaf_p, k=rotate_right_num) # rot90: rotate matrix LEFT k times
if is_flip_vertical:
leaf_p = np.flipud(leaf_p)
leaf_p = leaf_p.reshape((64, ))
self.var_p[key] = leaf_p # P is value for next_player (black or white)
self.var_p[another_side_key] = leaf_p
self.expanded.add(key)
self.now_expanding.remove(key)
return float(leaf_v)
async def prediction_worker(self):
"""For better performance, queueing prediction requests and predict together in this worker.
speed up about 45sec -> 15sec for example.
:return:
"""
q = self.prediction_queue
margin = 10 # avoid finishing before other searches starting.
while self.running_simulation_num > 0 or margin > 0:
if q.empty():
if margin > 0:
margin -= 1
await asyncio.sleep(self.config.play.prediction_worker_sleep_sec)
continue
item_list = [q.get_nowait() for _ in range(q.qsize())] # type: list[QueueItem]
#logger.debug(f"predicting {len(item_list)} items")
data = np.array([x.state for x in item_list])
policy_ary, value_ary = self.api.predict(data) # shape=(N, 2, 8, 8)
#logger.debug(f"predicted {len(item_list)} items")
for p, v, item in zip(policy_ary, value_ary, item_list):
item.future.set_result((p, v))
async def predict(self, x):
future = self.loop.create_future()
item = QueueItem(x, future)
await self.prediction_queue.put(item)
return future
def finish_game(self, z):
"""
:param z: win=1, lose=-1, draw=0
:return:
"""
for move in self.moves: # add this game winner result to all past moves.
move += [z]
def calc_policy(self, own, enemy):
"""calc π(a|s0)
:param own:
:param enemy:
:return:
"""
pc = self.play_config
env = ReversiEnv().update(own, enemy, Player.black)
key = self.counter_key(env)
if env.turn < pc.change_tau_turn:
return self.calc_policy_by_tau_1(key)
else:
action = np.argmax(self.var_n[key]) # tau = 0
ret = np.zeros(64)
ret[action] = 1
return ret
def calc_policy_by_tau_1(self, key):
return self.var_n[key] / np.sum(self.var_n[key]) # tau = 1
@staticmethod
def counter_key(env: ReversiEnv):
return CounterKey(env.board.black, env.board.white, env.next_player.value)
@staticmethod
def another_side_counter_key(env: ReversiEnv):
return CounterKey(env.board.white, env.board.black, another_player(env.next_player).value)
def select_action_q_and_u(self, env, is_root_node):
key = self.counter_key(env)
if env.next_player == Player.black:
legal_moves = find_correct_moves(key.black, key.white)
else:
legal_moves = find_correct_moves(key.white, key.black)
# noinspection PyUnresolvedReferences
xx_ = np.sqrt(np.sum(self.var_n[key])) # SQRT of sum(N(s, b); for all b)
xx_ = max(xx_, 1) # avoid u_=0 if N is all 0
p_ = self.var_p[key]
# re-normalize in legal moves
p_ = p_ * bit_to_array(legal_moves, 64)
if np.sum(p_) > 0:
# decay policy gradually in the end phase
_pc = self.config.play
temperature = min(np.exp(1-np.power(env.turn/_pc.policy_decay_turn, _pc.policy_decay_power)), 1)
# normalize and decay policy
p_ = self.normalize(p_, temperature)
if is_root_node and self.play_config.noise_eps > 0: # Is it correct?? -> (1-e)p + e*Dir(alpha)
noise = dirichlet_noise_of_mask(legal_moves, self.play_config.dirichlet_alpha)
p_ = (1 - self.play_config.noise_eps) * p_ + self.play_config.noise_eps * noise
u_ = self.play_config.c_puct * p_ * xx_ / (1 + self.var_n[key])
if env.next_player == Player.black:
v_ = (self.var_q(key) + u_ + 1000) * bit_to_array(legal_moves, 64)
else:
# When enemy's selecting action, flip Q-Value.
v_ = (-self.var_q(key) + u_ + 1000) * bit_to_array(legal_moves, 64)
# noinspection PyTypeChecker
action_t = int(np.argmax(v_))
return action_t
@staticmethod
def normalize(p, t=1):
pp = np.power(p, t)
return pp / np.sum(pp)
def create_solver(self):
return ReversiSolver()
```
#### File: src/reversi_zero/manager.py
```python
import argparse
from logging import getLogger
import yaml
from moke_config import create_config
from .lib.logger import setup_logger
from .config import Config
logger = getLogger(__name__)
CMD_LIST = ['self', 'opt', 'eval', 'play_gui', 'nboard']
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument("cmd", help="what to do", choices=CMD_LIST)
parser.add_argument("-c", help="specify config yaml", dest="config_file")
parser.add_argument("--new", help="run from new best model", action="store_true")
parser.add_argument("--type", help="deprecated. Please use -c instead")
parser.add_argument("--total-step", help="set TrainerConfig.start_total_steps", type=int)
return parser
def setup(config: Config, args):
config.opts.new = args.new
if args.total_step is not None:
config.trainer.start_total_steps = args.total_step
config.resource.create_directories()
setup_logger(config.resource.main_log_path)
def start():
parser = create_parser()
args = parser.parse_args()
if args.type:
print("I'm very sorry. --type option was deprecated. Please use -c option instead!")
return 1
if args.config_file:
with open(args.config_file, "rt") as f:
config = create_config(Config, yaml.load(f))
else:
config = create_config(Config)
setup(config, args)
if args.cmd != "nboard":
logger.info(f"config type: {config.type}")
if args.cmd == "self":
from .worker import self_play
return self_play.start(config)
elif args.cmd == 'opt':
from .worker import optimize
return optimize.start(config)
elif args.cmd == 'eval':
from .worker import evaluate
return evaluate.start(config)
elif args.cmd == 'play_gui':
from .play_game import gui
return gui.start(config)
elif args.cmd == 'nboard':
from .play_game import nboard
return nboard.start(config)
```
#### File: reversi_zero/play_game/common.py
```python
from reversi_zero.config import Config
from reversi_zero.lib.model_helpler import reload_newest_next_generation_model_if_changed, load_best_model_weight
def load_model(config: Config):
from reversi_zero.agent.model import ReversiModel
model = ReversiModel(config)
if config.play.use_newest_next_generation_model:
loaded = reload_newest_next_generation_model_if_changed(model) or load_best_model_weight(model)
else:
loaded = load_best_model_weight(model) or reload_newest_next_generation_model_if_changed(model)
if not loaded:
raise RuntimeError("No models found!")
return model
```
#### File: test/agent/test_player.py
```python
from nose.tools.trivial import eq_, ok_
import numpy as np
from reversi_zero.config import Config
from reversi_zero.agent.player import ReversiPlayer
from reversi_zero.lib.bitboard import bit_count
def test_add_data_to_move_buffer_with_8_symmetries():
config = Config()
player = ReversiPlayer(config, None)
"""
board: p=0.2, q=0.8, O=own, X=enemy
01234567 - x
0O q
1 O
2
3
4
5
6 X
7p X
|
y
"""
own = stone_bit(0, 0) | stone_bit(1, 1)
enemy = stone_bit(7, 6) | stone_bit(7, 7)
policy = np.zeros((64, ))
policy[idx(7, 0)] = 0.8
policy[idx(0, 7)] = 0.2
player.add_data_to_move_buffer_with_8_symmetries(own, enemy, policy)
# no transform
(o, e), p = player.moves[0] # own, enemy, policy
eq_((bit_count(o), bit_count(e)), (2, 2))
ok_(check_bit(o, 0, 0))
ok_(check_bit(o, 1, 1))
ok_(check_bit(e, 7, 6))
ok_(check_bit(e, 7, 7))
eq_(p[idx(7, 0)], 0.8)
eq_(p[idx(0, 7)], 0.2)
# rotate right
(o, e), p = player.moves[1] # own, enemy, policy
eq_((bit_count(o), bit_count(e)), (2, 2))
ok_(check_bit(o, 7, 0))
ok_(check_bit(o, 6, 1))
ok_(check_bit(e, 0, 7))
ok_(check_bit(e, 1, 7))
eq_(p[idx(7, 7)], 0.8)
eq_(p[idx(0, 0)], 0.2)
# rotate right twice
(o, e), p = player.moves[2] # own, enemy, policy
eq_((bit_count(o), bit_count(e)), (2, 2))
ok_(check_bit(o, 7, 7))
ok_(check_bit(o, 6, 6))
ok_(check_bit(e, 0, 0))
ok_(check_bit(e, 0, 1))
eq_(p[idx(0, 7)], 0.8)
eq_(p[idx(7, 0)], 0.2)
# flip vertical -> rotate right
(o, e), p = player.moves[5] # own, enemy, policy
eq_((bit_count(o), bit_count(e)), (2, 2))
ok_(check_bit(o, 0, 0))
ok_(check_bit(o, 1, 1))
ok_(check_bit(e, 6, 7))
ok_(check_bit(e, 7, 7))
eq_(p[idx(0, 7)], 0.8)
eq_(p[idx(7, 0)], 0.2)
def idx(x, y):
return y*8 + x
def stone_bit(x, y):
return 1 << idx(x, y)
def check_bit(bb, x, y):
return bb & stone_bit(x, y) != 0
```
#### File: test/lib/test_bitboard.py
```python
import numpy as np
from nose.tools import assert_almost_equal
from nose.tools.trivial import ok_, eq_
from reversi_zero.lib.bitboard import find_correct_moves, board_to_string, bit_count, dirichlet_noise_of_mask, \
bit_to_array
from reversi_zero.lib.util import parse_to_bitboards
def test_find_correct_moves_1():
ex = '''
##########
#OO #
#XOO #
#OXOOO #
# XOX #
# XXX #
# X #
# X #
# #
##########'''
expect = '''
##########
#OO #
#XOO #
#OXOOO #
#**XOX* #
# **XXX #
# X**** #
# X #
# #
##########
'''
_flip_test(ex, expect)
def _flip_test(ex, expect, player_black=True):
b, w = parse_to_bitboards(ex)
moves = find_correct_moves(b, w) if player_black else find_correct_moves(w, b)
res = board_to_string(b, w, extra=moves)
eq_(res.strip(), expect.strip(), f"\n{res}----{expect}")
def test_find_correct_moves_2():
ex = '''
##########
#OOOOOXO #
#OOOOOXOO#
#OOOOOXOO#
#OXOXOXOO#
#OOXOXOXO#
#OOOOOOOO#
#XXXO O#
# #
##########'''
expect = '''
##########
#OOOOOXO*#
#OOOOOXOO#
#OOOOOXOO#
#OXOXOXOO#
#OOXOXOXO#
#OOOOOOOO#
#XXXO***O#
# * #
##########'''
_flip_test(ex, expect, player_black=False)
def test_find_correct_moves_3():
ex = '''
##########
#OOXXXXX #
#XOXXXXXX#
#XXXXXXXX#
#XOOXXXXX#
#OXXXOOOX#
#OXXOOOOX#
#OXXXOOOX#
# OOOOOOO#
##########'''
expect1 = '''
##########
#OOXXXXX #
#XOXXXXXX#
#XXXXXXXX#
#XOOXXXXX#
#OXXXOOOX#
#OXXOOOOX#
#OXXXOOOX#
#*OOOOOOO#
##########'''
expect2 = '''
##########
#OOXXXXX*#
#XOXXXXXX#
#XXXXXXXX#
#XOOXXXXX#
#OXXXOOOX#
#OXXOOOOX#
#OXXXOOOX#
# OOOOOOO#
##########'''
_flip_test(ex, expect1, player_black=False)
_flip_test(ex, expect2, player_black=True)
def test_dirichlet_noise_of_mask():
legal_moves = 47289423
bc = bit_count(legal_moves)
noise = dirichlet_noise_of_mask(legal_moves, 0.5)
assert_almost_equal(1, np.sum(noise))
eq_(bc, np.sum(noise > 0))
ary = bit_to_array(legal_moves, 64)
eq_(list(noise), list(noise * ary))
``` |
{
"source": "jinntechio/conan-sol2",
"score": 2
} |
#### File: jinntechio/conan-sol2/conanfile.py
```python
from conans import ConanFile, tools
from conans.tools import os_info, SystemPackageTool
import os
class Sol2Conan(ConanFile):
name = "sol2"
version = "2.20.6"
description = "sol is a C++ library binding to Lua. It currently supports all Lua versions 5.1+ (LuaJIT 2.x "\
"included). sol aims to be easy to use and easy to add to a project. The library is header-only for "\
"easy integration with projects."
topics = ("conan", "lua")
url = "https://github.com/jinncrafters/conan-sol2"
homepage = "https://github.com/ThePhD/sol2"
license = "MIT"
no_copy_source = True
_source_subfolder = "source_subfolder"
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def package(self):
include_folder = os.path.join(self._source_subfolder, "single/sol")
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
self.copy(pattern="*", dst="include", src=include_folder)
def package_id(self):
self.info.header_only()
def system_requirements(self):
pack_name = None
if os_info.linux_distro == "ubuntu":
if os_info.os_version > "12":
pack_name = "lua5.3 liblua5.3-dev"
else:
pack_name = "lua5.3 liblua5.3-dev"
# elif os_info.linux_distro == "fedora" or os_info.linux_distro == "centos":
# pack_name = "package_name_in_fedora_and_centos"
elif os_info.is_macos:
pack_name = ["lua"]
# elif os_info.is_freebsd:
# pack_name = "package_name_in_freebsd"
# elif os_info.is_solaris:
# pack_name = "package_name_in_solaris"
if pack_name:
installer = SystemPackageTool()
installer.install(pack_name)
``` |
{
"source": "jinntechio/RocketJoe",
"score": 3
} |
#### File: apps/medusa_cascade/demo.py
```python
import asyncio
import websockets
import msgpack
import uuid
async def hello():
uri = "ws://localhost:9999"
async with websockets.connect(uri) as websocket:
data = msgpack.packb([str(uuid.uuid4()),3,["1qaz",["key"],["key"]]], use_bin_type=True)
await websocket.send(data)
print(f"> {data}")
greeting = await websocket.recv()
print(f"< {greeting}")
asyncio.get_event_loop().run_until_complete(hello())
```
#### File: rocketjoe_kernel/test/test_jupyter_kernel_benchmark.py
```python
import jupyter_kernel_test
import jupyter_kernel_mgmt
from jupyter_kernel_mgmt.discovery import KernelFinder
from jupyter_kernel_mgmt.client import BlockingKernelClient
# import jupyter_kernel_mgmt
# from jupyter_kernel_mgmt.discovery import KernelFinder
# from jupyter_kernel_mgmt.client import BlockingKernelClient
def kernel_info_request(kc) -> None:
kc.kernel_info()
def execute_request_1(kc) -> None:
kc.execute_interactive("i = 0\ni + 1")
def execute_request_2(kc) -> None:
kc.execute_interactive("print(6 * 7)")
def complete_request_1(kc) -> None:
kc.complete("prin")
def complete_request_2(kc) -> None:
kc.complete("print")
def is_complete_request_1(kc) -> None:
kc.is_complete("prin")
def is_complete_request_2(kc) -> None:
kc.is_complete("print")
def test_rocketjoe_kernel_info_request(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(kernel_info_request, kc)
def test_rocketjoe_kernel_info_request(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(kernel_info_request, kc)
def test_rocketjoe_execute_request_1(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(execute_request_1, kc)
def test_rocketjoe_execute_request_1(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(execute_request_1, kc)
def test_rocketjoe_execute_request_2(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(execute_request_2, kc)
def test_rocketjoe_execute_request_2(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(execute_request_2, kc)
def test_rocketjoe_complete_request_1(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(complete_request_1, kc)
def test_rocketjoe_complete_request_1(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(complete_request_1, kc)
def test_rocketjoe_complete_request_2(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(complete_request_2, kc)
def test_rocketjoe_complete_request_2(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(complete_request_2, kc)
def test_rocketjoe_is_complete_request_1(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(is_complete_request_1, kc)
def test_rocketjoe_is_complete_request_1(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(is_complete_request_1, kc)
def test_rocketjoe_is_complete_request_2(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(is_complete_request_2, kc)
def test_rocketjoe_is_complete_request_2(benchmark) -> None:
with jupyter_kernel_mgmt.run_kernel_blocking('spec/rocketjoe') as kc:
benchmark(is_complete_request_2, kc)
```
#### File: network_service/demo/app.py
```python
import random
import string
from flask import Flask, make_response, jsonify
app = Flask(__name__)
@app.route('/v1/detection/jsonrpc', methods=['GET', 'POST'])
def hello_world():
response = make_response(
jsonify(
{"message": ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))}
),
200,
)
response.headers["Content-Type"] = "application/json"
return response
if __name__ == "__main__":
app.run(debug=True)
```
#### File: jinntechio/RocketJoe/setup.py
```python
import os
import pathlib
from setuptools import setup
from setuptools.dist import Distribution
from setuptools.command.install import install
def readme():
with open('README.md') as f:
return f.read()
class InstallPlatlib(install):
def finalize_options(self):
install.finalize_options(self)
if self.distribution.has_ext_modules():
self.install_lib = self.install_platlib
class BinaryDistribution(Distribution):
def has_ext_modules(foo):
return True
from setuptools import setup
setup(
name="duck_charmer",
version="1.0.0",
description='A wheel for duck_charmer',
long_description=readme(),
license='',
python_requires=">=3.9",
packages=["duck_charmer"],
include_package_data=True,
package_data={
'duck_charmer': ['duck_charmer*'],
},
cmdclass={'install': InstallPlatlib},
distclass=BinaryDistribution
)
``` |
{
"source": "JinnTech/Jinn_Functions",
"score": 3
} |
#### File: Jinn_Functions/py27/jinn_helper.py
```python
def progress(count, total, suffix=''):
"""
Description: Outputs a progress bar at bottom of terminal. Slightly
Usage: progress(iteration, total)
Credits: http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
Example:
import time
total = 1000
i = 0
while i < total:
progress(i, total)
time.sleep(0.5) # emulating long-playing job
i += 1
"""
import sys
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', suffix))
sys.stdout.flush()
``` |
{
"source": "jinnykoo/christmas",
"score": 2
} |
#### File: apps/designer/views.py
```python
from django.shortcuts import render
from django.conf import settings
from django.core.files import File
from oscar.core.loading import get_class, get_classes, get_model
ProductClass, Product, Category, ProductCategory = get_classes(
'catalogue.models', ('ProductClass', 'Product', 'Category',
'ProductCategory'))
ProductImage = get_model('catalogue', 'productimage')
def create(request):
context_dict = {}
if request.method == 'POST':
img_data = request.POST.get('imagesrc').decode("base64")
img_file = open("./public/photo.jpg", "wb")
img_file.write(img_data)
img_file.close()
#create a new product
product_class = ProductClass.objects.get(pk=2)
product = Product()
product.product_class = product_class
product.structure = Product.STANDALONE
product.title = 'the first product'
product.description = 'this is the first product'
product.save()
new_file = File(open('./public/photo.jpg', 'rb'))
im = ProductImage(product=product, display_order=0)
im.original.save('newtee.jpg', new_file, save=False)
im.save()
#save the image
return render(request, 'designer/success.html')
else:
print 'else'
return render(request, 'designer/create.html', context_dict)
``` |
{
"source": "jinny-sun/CookieCutter",
"score": 4
} |
#### File: CookieCutter/scripts/web_app.py
```python
import streamlit as st
import numpy as np
import pandas as pd
import re
# Functions
def string_replace(x):
new_string = re.sub(' {2,}', ' ', x).replace(" ", ';').replace("\n", ";").replace("; ;", ";")
# new_string = new_string.split(';')
return(new_string)
def get_ingredients (x):
ing_regex = ('(\d+/*\d*\s*\d*/*\d*)\s(\w+\s*.*?);')
all_ing = re.findall(ing_regex, x)
return(all_ing)
def get_quantity(x):
quantity = [y[0] for y in x] # use for df
units_with_ingredient = [y[1] for y in x]
df_of_units = pd.DataFrame({'quantity':quantity, 'ingredient':units_with_ingredient})
return (df_of_units)
def text_process(mess):
"""
Takes in a string of text, then performs the following:
1. Remove all punctuation
2. Remove all stopwords
3. Returns a list of the cleaned text
"""
import string
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
wnl = WordNetLemmatizer()
def lemmatize(string):
for word in re.findall(r"[a-z]+", string):
string = string.replace(word, wnl.lemmatize(word, 'n') if 's' in word[-3:] else word)
return string
unit_stopwords = ['dash','pinch','teaspoon','tablespoon','fluid','cup','pint','quart','ounce','oz','pound','rack',
'small','medium','large','crushed','grated','skinless','boneless','melted','fresh',
'diced','minced','thinly','dry','dried','halved','taste','frying','lean','drained','jars','grated'
'clove','slice','eaches','whole','cube','thick','unit','freshly','finely','splash',
'semisweet','chip','extract','spread','powder','room','temperature','brown','cooking','yolk','ground',
'package','mix','cake','plain','goody','light','wheat','piece','substitute','mini','kosher','crispy',
'minature','chunk','dark','bit','square','boiling','bag','crumb','popsicle','stick','zest','cereal',
'bar','tart','nib','tennessee','turbinado','baking','pack','spice','moist','miniarature','crunchy',
'morsel','nugget','candy','crisp','super','fine','decoration','sucralose','puree','pureed','rainbow',
'cut','frozen','broken','round','concentrate','miniature','cooky','virgin','dusting','half','baby',
'food','jar','seedless','container','box','granule','filling','cold','super','ripe','moisture',
'packet','instant','mint','ripe','sea','coarse','fun','size','funsize','bulk','chopped','torn']
# Remove anything in parenthesis
mess = re.sub(r"\([^\)]+\)", '', mess)
# Make everything lowercase
mess = mess.lower()
# Remove non-word punctuation
mess =' '.join(re.findall(r"[-,''\w]+", mess)) # This leaves some commas as a character #
mess = re.sub(r"\,", ' ', mess)
# Remove hypenated words
mess = re.sub(r"(?=\S*['-])([a-zA-Z'-]+)",'',mess) # remove hypenated words
# Remove punctuation and numbers
mess = ''.join([i for i in mess if not i.isdigit()])
# Remove plurals
mess = lemmatize(mess)
#clean excess whitespace
mess = re.sub(r"\s+", ' ', mess).strip()
# Remove stopwords
mess = [word for word in mess.split() if word.lower() not in stopwords.words('english')]
mess = [word for word in mess if word.lower() not in unit_stopwords]
mess = ' '.join(mess)
return(mess.split())
def test_noun(tokens):
import nltk
tagged = nltk.pos_tag(tokens)
return([token[0] for token in tagged if token[1] in ['NN',]])
def convert_fractions (quantity):
from fractions import Fraction
return float(sum(Fraction(s) for s in quantity.split()))
# Banner
st.image('banner.png', use_column_width=True)
st.title("Cut the calories from your cookie recipe!")
st.subheader('What would you like to make?')
# Load training data
X_train = pd.read_csv('X_train.csv')
y_train = pd.read_csv('y_train.csv')
ingredient_string = st.text_input('Input the ingredient list here:', '1 cup packed brown sugar; 1 cup white sugar; 1 cup butter; 2 eggs; 1 teaspoon baking soda; 1 teaspoon salt; 1 teaspoon vanilla extract; 2 1/2 cups sifted all-purpose flour; 1/2 cup chopped walnuts; 2 cups semisweet chocolate chips')
if ingredient_string:
st.write('Ingredients',ingredient_string)
serving_size = st.number_input('How many cookies will be made using this recipe?', 24)
if ingredient_string:
st.write('This recipe will make',serving_size,'cookies')
desiredcal = st.number_input('What is the maximum number of calories per cookie you desire?', 200)
if ingredient_string:
st.write('Each cookie should have less than',desiredcal,'calories.')
button = st.button('Get this recipe!')
if button:
# Process ingredient_string
serving_size = serving_size
ingredient_string = ingredient_string + ';' # add semicolon to end of ingredient list for regex
ingredient_string = string_replace(ingredient_string) # remove white space
ingredient_string_tuple = get_ingredients(ingredient_string) # separate ingredients into list of tuples
testdf = get_quantity(ingredient_string_tuple) # separate quantity from words
testdf['quantity'] = [convert_fractions(x) for x in testdf['quantity']]
testdf['unit'] = np.where(testdf.ingredient.str.contains("dash"), .3,
np.where(testdf.ingredient.str.contains("pinch"), .6,
np.where(testdf.ingredient.str.contains("teaspoon"), 5,
np.where(testdf.ingredient.str.contains("tablespoon"), 3,
np.where(testdf.ingredient.str.contains("fluid"), 30,
np.where(testdf.ingredient.str.contains("cup"), 240,
np.where(testdf.ingredient.str.contains("pint"), 473,
np.where(testdf.ingredient.str.contains("quart"), 980,
np.where(testdf.ingredient.str.contains("ounce"), 28,
np.where(testdf.ingredient.str.contains("oz"), 28,
np.where(testdf.ingredient.str.contains("pound"), 454,
np.where(testdf.ingredient.str.contains("rack"), 908,
np.where(testdf.ingredient.str.contains("small"), 50,
np.where(testdf.ingredient.str.contains("medium"), 60,
np.where(testdf.ingredient.str.contains("large"), 70,
1)))))))))))))))
# Total quantity of each ingredient needed for recipe (grams* quantity) and condense into a list.
testdf['norm_quant'] = round(testdf['unit']*testdf['quantity'])
testdf['norm_quant'] = testdf['norm_quant'].astype(int)
st.subheader('Ingredients')
testdf[['quantity','ingredient']]
# Tokenization = convert text string into list of tokens, or words, we want (i.e., cleaned version of words).
import string
from nltk.corpus import stopwords
testdf['ingredient']=[text_process(x) for x in testdf['ingredient']]
# One word per ingredient - keep only nouns, join multiple words as one string
testdf['ingredient'] = [test_noun(tokens) for tokens in testdf['ingredient']]
testdf['ingredient'] = [''.join(tokens) for tokens in testdf['ingredient']]
# Repeat word by normalized quantity
testdf['ingredient'] = testdf['ingredient'].astype(str) + ' '
zipped = list(zip(testdf['ingredient'], testdf['norm_quant']))
inglist = [t[0]*t[1] for t in zipped]
inglist = ''.join(inglist)
inglist = [inglist]
# Calorie Prediction
import pickle
bow_transformer = pickle.load(open('bow_transformer.sav','rb'))
ingredient_bow_train = pickle.load(open('ingredient_bow_train.sav','rb'))
inglist_bow_test = bow_transformer.transform(inglist)
# Gradient Boosting Regressor
from sklearn.ensemble import GradientBoostingRegressor
gboost = GradientBoostingRegressor(loss="ls", learning_rate=0.03, n_estimators=1500, max_depth=7, min_samples_split=950, min_samples_leaf=6, subsample=0.8, max_features=21, random_state=10)
gboost.fit(ingredient_bow_train, y_train['totalCal'])
predictions = gboost.predict(inglist_bow_test)
# Output
st.subheader('Calorie Predictor')
calPerServing = round(predictions[0]/serving_size,1)
st.write()
if calPerServing < desiredcal:
'If you make ', serving_size, 'cookies with this recipe, each cookie is', calPerServing, "calories. That's less than", desiredcal,'calories per cookie! :grin:'
else:
'If you make ', serving_size, 'cookies with this recipe, each cookie is', calPerServing, "calories. That's more than", desiredcal,'calories per cookie. :cry:'
import math
new_servings = math.ceil(predictions[0]/desiredcal)
new_calories = round(predictions[0]/new_servings,1)
'If you make', new_servings, "cookies instead using the same recipe, each cookie is only", new_calories, "calories. That's less than", desiredcal,'calories per cookie! :grin:'
``` |
{
"source": "jinoan/augwrap",
"score": 2
} |
#### File: src/augwrap/data.py
```python
from .utils.chart import stacked_bar
from copy import copy
import numpy as np
import cv2
from sklearn.model_selection import KFold as KF, StratifiedKFold as SKF
from torch.utils.data import Dataset
from tensorflow.keras.utils import Sequence
import xml.etree.ElementTree as ET
class BaseDataset:
def __init__(self, images, labels, classes, **kwargs):
self.__dict__ = kwargs
self.images = images
self.labels = labels
self.classes = classes
def __len__(self):
return len(self.images)
def __getitem__(self, index):
return {'image': self.images[index], 'label': self.labels[index]}
class TorchBaseDataset(Dataset):
def __init__(
self,
images=None,
labels=None,
classes=None,
**kwargs
):
super(TorchBaseDataset, self).__init__()
self.__dict__ = kwargs
self.images = images
self.labels = labels
self.classes = classes
def __len__(self):
return len(self.images)
def __getitem__(self, index):
return {"image": self.images[index], "label": self.labels[index]}
class TFBaseDataset(Sequence):
def __init__(
self,
images=None,
labels=None,
classes=None,
**kwargs
):
super(TFBaseDataset, self).__init__()
self.__dict__ = kwargs
self.images = images
self.labels = labels
self.classes = classes
def __len__(self):
return len(self.images)
def __getitem__(self, index):
return {"image": self.images[index], "label": self.labels[index]}
def base_inheritance(cls):
def inherit_base(dataset, *args, **kwargs):
return type(cls.__name__, (cls, dataset.__class__.mro()[-2],), {})(dataset, *args, **kwargs)
return inherit_base
@base_inheritance
class LoadImages:
def __init__(self, dataset, color_mode=1):
# color_mode 1: "color", 0: "grey", -1: "unchanged"
self.__dict__ = dataset.__dict__.copy()
self.dataset = dataset
self.color_mode = {1: "color", 0: "grey", -1: "unchanged"}.get(color_mode, color_mode)
def __getitem__(self, index):
sample = self.dataset[index]
sample["image"] = cv2.imread(
sample["image"],
flags={
"color": cv2.IMREAD_COLOR,
"grey": cv2.IMREAD_GRAYSCALE,
"unchanged": cv2.IMREAD_UNCHANGED
}[self.color_mode]
)
if sample["image"].ndim == 2:
sample["image"] = sample["image"][..., np.newaxis]
return sample
@base_inheritance
class ResizeImages:
def __init__(self, dataset, image_size=(200, 200), interpolation=cv2.INTER_LINEAR):
self.__dict__ = dataset.__dict__.copy()
self.dataset = dataset
self.image_size = image_size
self.interpolation = interpolation
def __getitem__(self, index):
sample = self.dataset[index]
sample["image"] = cv2.resize(sample["image"], dsize=self.image_size, interpolation=self.interpolation)
return sample
@base_inheritance
class OneHotLabels:
def __init__(self, dataset):
self.__dict__ = dataset.__dict__.copy()
self.dataset = dataset
def __getitem__(self, index):
sample = self.dataset[index]
sample["label"] = np.array(list(map(lambda x: int(x==sample["label"]), self.classes)))
return sample
@base_inheritance
class SparseLabels:
def __init__(self, dataset):
self.__dict__ = dataset.__dict__.copy()
self.dataset = dataset
def __getitem__(self, index):
sample = self.dataset[index]
sample["label"] = self.classes.index(sample["label"])
return sample
@base_inheritance
class Augmentations:
def __init__(self, dataset, augmentations):
self.__dict__ = dataset.__dict__.copy()
self.dataset = dataset
self.augmentations = augmentations
def __getitem__(self, index):
sample = self.dataset[index]
sample = self.augmentations(**sample)
return sample
@base_inheritance
class Transforms:
def __init__(self, dataset, transforms):
self.__dict__ = dataset.__dict__.copy()
self.dataset = dataset
self.transforms = transforms
def __getitem__(self, index):
sample = self.dataset[index]
sample = self.transforms(sample)
return sample
@base_inheritance
class TFDataGenerator:
def __init__(self, dataset, batch_size=16, shuffle=False):
self.__dict__ = dataset.__dict__.copy()
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.image_shape = dataset[0]["image"].shape
self.label_size = dataset[0]["label"].size
self.on_epoch_end()
def on_epoch_end(self):
self.indexes = np.arange(len(self.images))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __len__(self):
return -(-len(self.images) // self.batch_size)
def __getitem__(self, batch_index):
indexes = self.indexes[batch_index*self.batch_size:(batch_index+1)*self.batch_size]
images, labels = self.__data_generation(indexes)
# return batch size items
return images, labels
def __data_generation(self, indexes):
images = np.empty((indexes.size, *self.image_shape))
labels = np.empty((indexes.size, self.label_size))
for i, index in enumerate(indexes):
sample = self.dataset[index]
images[i,] = sample["image"]
labels[i,] = sample["label"]
return images, labels
class KFold:
def __init__(self, dataset):
self.__dict__ = dataset.__dict__.copy()
self.dataset = dataset
def split(self, n_splits=5, info=False):
kf = KF(n_splits=n_splits, shuffle=True).split(self.images, self.labels)
folds = []
for train_idx, test_idx in kf:
train_dataset = copy(self.dataset)
train_dataset.__dict__["images"] = list(map(lambda idx: self.images[idx], train_idx))
train_dataset.__dict__["labels"] = list(map(lambda idx: self.labels[idx], train_idx))
test_dataset = copy(self.dataset)
test_dataset.__dict__["images"] = list(map(lambda idx: self.images[idx], test_idx))
test_dataset.__dict__["labels"] = list(map(lambda idx: self.labels[idx], test_idx))
folds.append((train_dataset, test_dataset))
if info:
folds_info(folds)
return folds
class StratifiedKFold:
def __init__(self, dataset):
self.__dict__ = dataset.__dict__.copy()
self.dataset = dataset
def split(self, n_splits=5, info=False):
skf = SKF(n_splits=n_splits, shuffle=True).split(self.images, self.labels)
folds = []
for train_idx, test_idx in skf:
train_dataset = copy(self.dataset)
train_dataset.__dict__["images"] = list(map(lambda idx: self.images[idx], train_idx))
train_dataset.__dict__["labels"] = list(map(lambda idx: self.labels[idx], train_idx))
test_dataset = copy(self.dataset)
test_dataset.__dict__["images"] = list(map(lambda idx: self.images[idx], test_idx))
test_dataset.__dict__["labels"] = list(map(lambda idx: self.labels[idx], test_idx))
folds.append((train_dataset, test_dataset))
if info:
folds_info(folds)
return folds
def folds_info(folds, **kwargs):
if folds:
kwargs = {"x": [], "y": [[], []]}
for i, (train_dataset, test_dataset) in enumerate(folds):
kwargs["x"].append(i+1)
train_data_size = 0
test_data_size = 0
for _ in train_dataset.images: train_data_size += 1
for _ in test_dataset.images: test_data_size += 1
kwargs["y"][0].append(train_data_size)
kwargs["y"][1].append(test_data_size)
for fold, train_data_size, test_data_size in zip(kwargs["x"], *kwargs["y"]):
print(f"[fold {fold}] train_data_size: {train_data_size}, test_data_size: {test_data_size}")
stacked_bar(**kwargs)
@base_inheritance
class LoadPascalVOCLabels:
def __init__(self, dataset):
self.__dict__ = dataset.__dict__.copy()
self.dataset = dataset
self.bbox_format = "albumentations"
def __getitem__(self, index):
sample = self.dataset[index]
sample['labels'], sample['bboxes'] = self.__decode_xml(sample.pop('label'))
return sample
def __decode_xml(self, label):
tree = ET.parse(label)
root = tree.getroot()
width = int(root.find('size').find('width').text)
height = int(root.find('size').find('height').text)
objects = root.findall('object')
labels, bboxes = [], []
for obj in objects:
labels.append(obj.find('name').text)
box = obj.find('bndbox')
xmin = int(box.find('xmin').text) / width
ymin = int(box.find('ymin').text) / height
xmax = int(box.find('xmax').text) / width
ymax = int(box.find('ymax').text) / height
bboxes.append([xmin, ymin, xmax, ymax])
return labels, bboxes
``` |
{
"source": "jino-cod/rest_meets_djongo",
"score": 2
} |
#### File: tests/fields/test_embedded.py
```python
from rest_meets_djongo.fields import EmbeddedModelField
from rest_meets_djongo.meta_manager import get_model_meta
from tests.models import ContainerModel, EmbedModel
from pytest import fixture, mark
@mark.embed
@mark.field
class TestDataParsing(object):
obj_data = {
'int_field': 123,
'char_field': "Hello"
}
instance = EmbedModel(**obj_data)
djm_embed = get_model_meta(ContainerModel).get_field('embed_field')
rmd_embed = EmbeddedModelField(model_field=djm_embed)
@fixture
def errors(self, build_tuple):
from rest_framework.exceptions import ValidationError
err_dict = {
'ValidationError': ValidationError,
'TypeError': TypeError
}
return build_tuple('Errors', err_dict)
def test_to_internal_val(self):
new_instance = self.rmd_embed.to_internal_value(self.obj_data)
assert str(self.instance) == str(new_instance)
def test_to_representation(self):
new_data = self.rmd_embed.to_representation(self.instance)
assert self.obj_data == new_data
def test_conversion_equivalence(self):
data = self.rmd_embed.to_representation(self.instance)
new_instance = self.rmd_embed.to_internal_value(data)
assert str(self.instance) == str(new_instance)
@mark.error
def test_invalid_rejection(self, error_raised):
# Non-dictionary values are rejected
not_a_dict = 1234
with error_raised:
self.rmd_embed.run_validation(not_a_dict)
# Dictionaries denoting fields which do not exist are rejected
wrong_dict = {
'bool_field': True,
'char_field': 'error'
}
with error_raised:
self.rmd_embed.run_validation(wrong_dict)
```
#### File: rest_meets_djongo/tests/models.py
```python
from djongo import models
# --- Basic Models --- #
# Generic, DRF compliant model, with all DRF fields
class GenericModel(models.Model):
big_int = models.BigIntegerField()
bool = models.BooleanField()
char = models.CharField(max_length=20)
comma_int = models.CommaSeparatedIntegerField()
date = models.DateField()
date_time = models.DateTimeField()
decimal = models.DecimalField(max_digits=10, decimal_places=5)
email = models.EmailField()
float = models.FloatField()
integer = models.IntegerField()
null_bool = models.NullBooleanField()
pos_int = models.PositiveIntegerField()
pos_small_int = models.PositiveSmallIntegerField()
slug = models.SlugField()
small_int = models.SmallIntegerField()
text = models.TextField()
time = models.TimeField()
url = models.URLField()
ip = models.GenericIPAddressField()
uuid = models.UUIDField()
# TODO: add these
# basic_file = models.FileField()
# image = models.ImageField()
objects = models.DjongoManager()
# Model with its primary key set as its ObjectID
class ObjIDModel(models.Model):
_id = models.ObjectIdField()
int_field = models.IntegerField()
char_field = models.CharField(max_length=5)
objects = models.DjongoManager()
# Model a variant for DRF standard arguments
class OptionsModel(models.Model):
db_column_id = models.ObjectIdField(db_column='_id')
null_char = models.CharField(null=True)
blank_char = models.TextField(blank=True)
choice_char = models.CharField(choices=['Foo', 'Bar', 'Baz'])
default_email = models.EmailField(default='<EMAIL>')
read_only_int = models.IntegerField(editable=False)
# NOTE: By default, custom error messages are not conserved. This is
# just here to make sure it does not crash the serializer
custom_error = models.IntegerField(error_messages={
'blank': 'You tried to submit a blank integer, you dingus'
})
help_char = models.CharField(help_text='Super helpful text')
unique_int = models.IntegerField(unique=True)
objects = models.DjongoManager()
# --- Embedded Model Containing Models --- #
# Model for use w/ testing embedded models
class EmbedModel(models.Model):
int_field = models.IntegerField()
char_field = models.CharField(max_length=5)
objects = models.DjongoManager()
def __eq__(self, other):
return (isinstance(other, EmbedModel) and
self.char_field == other.char_field and
self.int_field == other.int_field)
def __str__(self):
return str(self.int_field) + "-" + str(self.char_field)
class Meta:
abstract = True
# Model for use w/ testing nested embedded models,
class ContainerModel(models.Model):
_id = models.ObjectIdField()
control_val = models.CharField(default='CONTROL', max_length=7)
embed_field = models.EmbeddedField(model_container=EmbedModel,
blank=True)
objects = models.DjongoManager()
def __eq__(self, other):
# Only compare _id if both have one (neither are embedded)
_id_match = True
if self._id and other._id:
_id_match = (str(self._id) == str(other._id))
# Compare the other values to confirm they are identical
return(
_id_match and
self.control_val == other.control_val and
self.embed_field.__eq__(other.embed_field)
)
def __str__(self):
vals = [self.control_val, str(self.embed_field)]
return f"{str(self._id)}: {'|'.join(vals)}"
# Model for testing w/ embedded models which contain embedded models
class DeepContainerModel(models.Model):
str_id = models.CharField(primary_key=True, max_length=10)
control_val = models.CharField(default='CONTROL', max_length=7)
deep_embed = models.EmbeddedField(model_container=ContainerModel)
objects = models.DjongoManager()
# Model for use w/ testing nested arrays of embedded models,
class ArrayContainerModel(models.Model):
_id = models.ObjectIdField()
embed_list = models.ArrayField(model_container=EmbedModel)
objects = models.DjongoManager()
class NullArrayContainerModel(models.Model):
_id = models.ObjectIdField()
nullable_list = models.ArrayField(model_container=EmbedModel, blank=True, null=True)
objects = models.DjongoManager()
# --- Relation Containing Models --- #
# Model related to by RelationContainerModel
class ManyToManyRelatedModel(models.Model):
_id = models.ObjectIdField()
boolean = models.BooleanField(default=True)
smol_int = models.SmallIntegerField()
objects = models.DjongoManager()
class ForeignKeyRelatedModel(models.Model):
_id = models.ObjectIdField()
null_bool = models.NullBooleanField()
description = models.TextField()
objects = models.DjongoManager()
# Model with representative types of relations
class RelationContainerModel(models.Model):
_id = models.ObjectIdField()
control_val = models.CharField(default='CONTROL', max_length=10)
fk_field = models.ForeignKey(to=ForeignKeyRelatedModel,
on_delete=models.CASCADE)
mtm_field = models.ManyToManyField(to=ManyToManyRelatedModel,
blank=True,
related_name='container_field')
objects = models.DjongoManager()
# Model related to by ArrayRelationModel
class ArrayRelatedModel(models.Model):
_id = models.ObjectIdField()
email = models.EmailField()
objects = models.DjongoManager()
class ArrayRelationModel(models.Model):
_id = models.ObjectIdField()
int_val = models.IntegerField(default=-1234)
arr_relation = models.ArrayReferenceField(
to=ArrayRelatedModel,
blank=True,
on_delete=models.CASCADE
)
objects = models.DjongoManager()
``` |
{
"source": "jinoh808/blender",
"score": 2
} |
#### File: scripts/templates_py/operator_modal_draw.py
```python
import bpy
import bgl
import blf
import gpu
from gpu_extras.batch import batch_for_shader
def draw_callback_px(self, context):
print("mouse points", len(self.mouse_path))
font_id = 0 # XXX, need to find out how best to get this.
# draw some text
blf.position(font_id, 15, 30, 0)
blf.size(font_id, 20, 72)
blf.draw(font_id, "Hello Word " + str(len(self.mouse_path)))
# 50% alpha, 2 pixel width line
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
bgl.glEnable(bgl.GL_BLEND)
bgl.glLineWidth(2)
batch = batch_for_shader(shader, 'LINE_STRIP', {"pos": self.mouse_path})
shader.bind()
shader.uniform_float("color", (1.0, 1.0, 1.0, 0.5))
batch.draw(shader)
# restore opengl defaults
bgl.glLineWidth(1)
bgl.glDisable(bgl.GL_BLEND)
class ModalDrawOperator(bpy.types.Operator):
"""Draw a line with the mouse"""
bl_idname = "template.modal_operator"
bl_label = "Simple Modal Draw Operator"
def modal(self, context, event):
context.area.tag_redraw()
if event.type == 'MOUSEMOVE':
self.mouse_path.append((event.mouse_region_x, event.mouse_region_y))
elif event.type == 'LEFTMOUSE':
bpy.types.SpaceTextEditor.draw_handler_remove(self._handle, 'WINDOW')
return {'FINISHED'}
elif event.type in {'RIGHTMOUSE', 'ESC'}:
bpy.types.SpaceTextEditor.draw_handler_remove(self._handle, 'WINDOW')
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def invoke(self, context, event):
if context.area.type == 'TEXT_EDITOR':
# the arguments we pass the the callback
args = (self, context)
# Add the region OpenGL drawing callback
# draw in TEXT_EIDTOR space with 'POST_VIEW' and 'PRE_VIEW'
self._handle = bpy.types.SpaceTextEditor.draw_handler_add(draw_callback_px, args, 'WINDOW', 'POST_PIXEL')
self.mouse_path = []
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
else:
self.report({'WARNING'}, "Area found, cannot run operator")
return {'CANCELLED'}
def register():
bpy.utils.register_class(ModalDrawOperator)
def unregister():
bpy.utils.unregister_class(ModalDrawOperator)
def menu_func(self, context):
self.layout.operator(ModalDrawOperator.bl_idname)
if __name__ == "__main__":
register()
bpy.types.TOPBAR_MT_app_system.append(menu_func)
``` |
{
"source": "Jinoh-Cho/Visual-Genome-Image-Inpainting",
"score": 2
} |
#### File: src/metrics/ins.py
```python
import math
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from sklearn.metrics import top_k_accuracy_score
from tqdm import tqdm
import torch
import utils.sample as sample
import utils.misc as misc
def inception_softmax(eval_model, images):
with torch.no_grad():
embeddings, logits = eval_model.get_outputs(images)
ps = torch.nn.functional.softmax(logits, dim=1)
return ps
def calculate_kl_div(ps, splits):
scores = []
num_samples = ps.shape[0]
with torch.no_grad():
for j in range(splits):
part = ps[(j * num_samples // splits):((j + 1) * num_samples // splits), :]
kl = part * (torch.log(part) - torch.log(torch.unsqueeze(torch.mean(part, 0), 0)))
kl = torch.mean(torch.sum(kl, 1))
kl = torch.exp(kl)
scores.append(kl.unsqueeze(0))
scores = torch.cat(scores, 0)
m_scores = torch.mean(scores).detach().cpu().numpy()
m_std = torch.std(scores).detach().cpu().numpy()
return m_scores, m_std
def eval_generator(data_loader, generator, discriminator, eval_model, num_generate, y_sampler, split, batch_size,
z_prior, truncation_factor, z_dim, num_classes, LOSS, RUN, is_stylegan, generator_mapping,
generator_synthesis, is_acc, device, logger, disable_tqdm):
eval_model.eval()
ps_holder = []
if is_acc:
ImageNet_folder_label_dict = misc.load_ImageNet_label_dict()
loader_label_folder_dict = {v: k for k, v, in data_loader.dataset.data.class_to_idx.items()}
loader_label_holder = []
else:
top1, top5 = "N/A", "N/A"
if device == 0 and not disable_tqdm:
logger.info("Calculate Inception score of generated images ({} images).".format(num_generate))
num_batches = int(math.ceil(float(num_generate) / float(batch_size)))
for i in tqdm(range(num_batches), disable=disable_tqdm):
fake_images, fake_labels, _, _, _ = sample.generate_images(z_prior=z_prior,
truncation_factor=truncation_factor,
batch_size=batch_size,
z_dim=z_dim,
num_classes=num_classes,
y_sampler=y_sampler,
radius="N/A",
generator=generator,
discriminator=discriminator,
is_train=False,
LOSS=LOSS,
RUN=RUN,
is_stylegan=is_stylegan,
generator_mapping=generator_mapping,
generator_synthesis=generator_synthesis,
style_mixing_p=0.0,
device=device,
cal_trsp_cost=False)
ps = inception_softmax(eval_model, fake_images)
ps_holder.append(ps)
if is_acc:
loader_label_holder += list(fake_labels.detach().cpu().numpy())
with torch.no_grad():
ps_holder = torch.cat(ps_holder, 0)
m_scores, m_std = calculate_kl_div(ps_holder[:num_generate], splits=split)
if is_acc:
converted_labels = []
for loader_label in loader_label_holder:
converted_labels.append(ImageNet_folder_label_dict[loader_label_folder_dict[loader_label]])
pred = torch.argmax(ps_holder, 1).detach().cpu().numpy() - 1
top1 = top_k_accuracy_score([i + 1 for i in converted_labels], ps_holder[:, 1:1001].detach().cpu().numpy(), k=1)
top5 = top_k_accuracy_score([i + 1 for i in converted_labels], ps_holder[:, 1:1001].detach().cpu().numpy(), k=5)
return m_scores, m_std, top1, top5
def eval_dataset(data_loader, eval_model, splits, batch_size, device, disable_tqdm=False):
eval_model.eval()
num_samples = len(data_loader.dataset)
num_batches = int(math.ceil(float(num_samples) / float(batch_size)))
dataset_iter = iter(data_loader)
ps_holder = []
for i in tqdm(range(num_batches), disable=disable_tqdm):
real_images, real_labels = next(dataset_iter)
real_images = real_images.to(device)
ps = inception_softmax(eval_model, real_images)
ps_holder.append(ps)
with torch.no_grad():
ps_holder = torch.cat(ps_holder, 0)
m_scores, m_std = calculate_kl_div(ps_holder, splits=splits)
return m_scores, m_std
```
#### File: src/utils/ema.py
```python
import random
import torch
class Ema(object):
def __init__(self, source, target, decay=0.9999, start_iter=0):
self.source = source
self.target = target
self.decay = decay
self.start_iter = start_iter
self.source_dict = self.source.state_dict()
self.target_dict = self.target.state_dict()
print("Initialize the copied generator's parameters to be source parameters.")
with torch.no_grad():
for p_ema, p in zip(self.target.parameters(), self.source.parameters()):
p_ema.copy_(p)
for b_ema, b in zip(self.target.buffers(), self.source.buffers()):
b_ema.copy_(b)
def update(self, iter=None):
if iter >= 0 and iter < self.start_iter:
decay = 0.0
else:
decay = self.decay
with torch.no_grad():
for p_ema, p in zip(self.target.parameters(), self.source.parameters()):
p_ema.copy_(p.lerp(p_ema, decay))
for (b_ema_name, b_ema), (b_name, b) in zip(self.target.named_buffers(), self.source.named_buffers()):
if "num_batches_tracked" in b_ema_name:
b_ema.copy_(b)
else:
b_ema.copy_(b.lerp(b_ema, decay))
class EmaStylegan2(object):
def __init__(self, source, target, ema_kimg, ema_rampup, effective_batch_size):
self.source = source
self.target = target
self.ema_nimg = ema_kimg * 1000
self.ema_rampup = ema_rampup
self.batch_size = effective_batch_size
self.source_dict = self.source.state_dict()
self.target_dict = self.target.state_dict()
print("Initialize the copied generator's parameters to be source parameters.")
with torch.no_grad():
for p_ema, p in zip(self.target.parameters(), self.source.parameters()):
p_ema.copy_(p)
for b_ema, b in zip(self.target.buffers(), self.source.buffers()):
b_ema.copy_(b)
def update(self, iter=None):
ema_nimg = self.ema_nimg
if self.ema_rampup != "N/A":
cur_nimg = self.batch_size * iter
ema_nimg = min(self.ema_nimg, cur_nimg * self.ema_rampup)
ema_beta = 0.5 ** (self.batch_size / max(ema_nimg, 1e-8))
with torch.no_grad():
for p_ema, p in zip(self.target.parameters(), self.source.parameters()):
p_ema.copy_(p.lerp(p_ema, ema_beta))
for b_ema, b in zip(self.target.buffers(), self.source.buffers()):
b_ema.copy_(b)
``` |
{
"source": "jinok2im/ColossalAI",
"score": 2
} |
#### File: ColossalAI/colossalai/initialize.py
```python
import argparse
import os
import pprint
from pathlib import Path
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch.nn.modules.loss import _Loss
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from colossalai.amp import AMP_TYPE, convert_to_amp
from colossalai.amp.naive_amp import NaiveAMPModel
from colossalai.builder.builder import build_gradient_handler
from colossalai.context import Config, ConfigException, ParallelMode
from colossalai.core import global_context as gpc
from colossalai.context.moe_context import MOE_CONTEXT
from colossalai.engine import Engine
from colossalai.engine.ophooks import BaseOpHook
from colossalai.logging import get_dist_logger
from colossalai.nn.optimizer.colossalai_optimizer import ColossalaiOptimizer
from colossalai.utils import (accumulate_gradient, get_current_device, is_using_ddp, is_using_pp, is_using_sequence,
sync_model_param)
from colossalai.utils.moe import sync_moe_model_param
from colossalai.zero import convert_to_zero_v2
from colossalai.zero.sharded_optim.sharded_optim_v2 import ShardedOptimizerV2
def get_default_parser():
"""Reads user command line and uses an argument parser to parse the input arguments.
Input arguments include configuration, host, port, world size, local rank, backend for torch.distributed.
:return: Returns the parser with the default arguments, the user may add customized arguments into this parser
:rtype: Namespace
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, help='path to the config file')
parser.add_argument('--host', type=str, help='the master address for distributed training')
parser.add_argument('--port', type=int, help='the master port for distributed training')
parser.add_argument('--world_size', type=int, help='world size for distributed training')
parser.add_argument('--rank', type=int, help='rank for the default process group')
parser.add_argument('--local_rank', type=int, help='local rank on the node')
parser.add_argument('--backend', type=str, default='nccl', help='backend for distributed communication')
return parser
def launch(config: Union[str, Path, Config, Dict],
rank: int,
world_size: int,
host: str,
port: int,
backend: str = 'nccl',
local_rank: int = None,
seed: int = 1024,
verbose: bool = True):
"""This function first parses the configuration arguments, using :func:`parse_args()` in case one of the input
arguments are not given. Then initialize and set distributed environment by calling global_context's functions.
:param config: Config file or config file path are both acceptable
:type config: Union[str, dict, Config]
:param rank: Rank for the default process group
:type rank: int
:param world_size: World size of the default process group
:type world_size: int
:param host: The master address for distributed training
:type host: str
:param port: The master port for distributed training
:type port: str
:param backend: Backend for torch.distributed
:type backend: str, optional
:param local_rank: Rank for the process on the node and is used to set the default CUDA device, defaults to None.
If local_rank = None, the default device ordinal will be calculated automatically
:type local_rank: int, optional
:param seed: Specified random seed for every processes
:type seed: int, optional
:param verbose: Whether to print logs
:type verbose: bool, optional
:raises Exception: Raise exception when config type is wrong
"""
gpc.verbose = verbose
# set config
assert isinstance(config, (Config, str, Path, dict)), \
f'expected argument config to be Config, str or Path, but got {type(config)}'
if not isinstance(config, Config) and isinstance(config, dict):
config = Config(config)
if isinstance(config, (str, Path)):
config = Config.from_file(config)
gpc.load_config(config)
# init default process group
gpc.init_global_dist(rank, world_size, backend, host, port)
# init process groups for different parallel modes from config
gpc.init_parallel_groups()
# set cuda device
if torch.cuda.is_available():
# if local rank is not given, calculate automatically
gpc.set_device(local_rank)
gpc.set_seed(seed)
if verbose:
logger = get_dist_logger()
logger.info(
f'Distributed environment is initialized, '
f'data parallel size: {gpc.data_parallel_size}, pipeline parallel size: {gpc.pipeline_parallel_size}, '
f'tensor parallel size: {gpc.tensor_parallel_size}',
ranks=[0])
def launch_from_slurm(config: Union[str, Path, Config, Dict],
host: str,
port: int,
backend: str = 'nccl',
seed: int = 1024,
verbose: bool = True):
"""A wrapper for colossalai.launch for SLURM launcher by reading rank and world size from the environment variables
set by SLURM
:param config: Config file or config file path are both acceptable
:type config: Union[str, dict, Config]
:param host: The master address for distributed training
:type host: str
:param port: The master port for distributed training
:type port: str
:param backend: Backend for torch.distributed
:type backend: str, optional
:param seed: Specified random seed for every processes
:type seed: int, optional
:param verbose: Whether to print logs
:type verbose: bool, optional
"""
rank = int(os.environ['SLURM_PROCID'])
world_size = int(os.environ['SLURM_NPROCS'])
launch(config=config,
rank=rank,
world_size=world_size,
host=host,
port=port,
backend=backend,
seed=seed,
verbose=verbose)
def launch_from_openmpi(config: Union[str, Path, Config, Dict],
host: str,
port: int,
backend: str = 'nccl',
seed: int = 1024,
verbose: bool = True):
"""A wrapper for colossalai.launch for OpenMPI launcher by reading rank and world size from the environment variables
set by OpenMPI
:param config: Config file or config file path are both acceptable
:type config: Union[str, dict, Config]
:param host: The master address for distributed training
:type host: str
:param port: The master port for distributed training
:type port: str
:param backend: Backend for torch.distributed
:type backend: str, optional
:param seed: Specified random seed for every processes
:type seed: int, optional
:param verbose: Whether to print logs
:type verbose: bool, optional
"""
rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
launch(config=config,
local_rank=local_rank,
rank=rank,
world_size=world_size,
host=host,
port=port,
backend=backend,
seed=seed,
verbose=verbose)
def launch_from_torch(config: Union[str, Path, Config, Dict],
backend: str = 'nccl',
seed: int = 1024,
verbose: bool = True):
"""A wrapper for colossalai.launch for torchrun or torch.distributed.launch by reading rank and world size
from the environment variables set by PyTorch
:param config: Config file or config file path are both acceptable
:type config: Union[str, dict, Config]
:param backend: Backend for torch.distributed
:type backend: str, optional
:param seed: Specified random seed for every processes
:type seed: int, optional
:param verbose: Whether to print logs
:type verbose: bool, optional
"""
rank = int(os.environ['RANK'])
local_rank = int(os.environ['LOCAL_RANK'])
world_size = int(os.environ['WORLD_SIZE'])
host = os.environ['MASTER_ADDR']
port = int(os.environ['MASTER_PORT'])
launch(config=config,
local_rank=local_rank,
rank=rank,
world_size=world_size,
host=host,
port=port,
backend=backend,
seed=seed,
verbose=verbose)
def initialize(model: nn.Module,
optimizer: Optimizer,
criterion: Optional[_Loss] = None,
train_dataloader: Optional[Iterable] = None,
test_dataloader: Optional[Iterable] = None,
lr_scheduler: Optional[_LRScheduler] = None,
ophooks: Optional[List[BaseOpHook]] = None,
verbose: bool = True) -> Tuple[Engine, DataLoader, DataLoader, _LRScheduler]:
"""Core function to wrap the essential training components with our functionality based on the config which is
loaded into gpc.config.
:param model: Your model instance or a function to build the model
:type model: :class:`torch.nn.Module` or Callbale
:param optimizer: Your optimizer instance
:type optimizer: :class:`torch.optim.optimizer.Optimizer` or :class:`Type[torch.optim.optimizer]`
:param criterion: Your criterion instance
:type criterion: :class:`torch.nn.modules.loss._Loss`, optional
:param train_dataloader: Dataloader for training
:type train_dataloader: :class:`torch.utils.data.DataLoader`, optional
:param test_dataloader: Dataloader for testing
:type test_dataloader: :class:`torch.utils.data.DataLoader`, optional
:param lr_scheduler: Your lr scheduler instance, optional
:type lr_scheduler: :class:`torch.nn.lr_scheduler._LRScheduler`, optional
:param verbose: Whether to print logs
:type verbose: bool, optional
:return: (engine, train_dataloader, test_dataloader, lr_scheduler)
:rtype: Tuple
"""
# get logger
logger = get_dist_logger()
gpc.verbose = verbose
# get config from gpc
config = gpc.config
# print config
if verbose:
logger.info(
f"\n========== Your Config ========\n"
f"{pprint.pformat(gpc.config)}\n"
f"================================\n",
ranks=[0])
# cudnn
cudnn_benchmark = config.get('cudnn_benchmark', True)
cudnn_deterministic = config.get('cudnn_deterministic', False)
torch.backends.cudnn.benchmark = cudnn_benchmark
torch.backends.cudnn.deterministic = cudnn_deterministic
if verbose:
logger.info(f"cuDNN benchmark = {cudnn_benchmark}, deterministic = {cudnn_deterministic}", ranks=[0])
# zero
use_zero = hasattr(gpc.config, 'zero')
if use_zero:
zero_cfg = gpc.config.get('zero', None)
if zero_cfg is not None:
cfg_ = zero_cfg.copy()
else:
cfg_ = {}
optimizer_config = zero_cfg.get('optimizer_config', None)
model_config = zero_cfg.get('model_config', None)
model, optimizer = convert_to_zero_v2(model,
optimizer,
model_config=model_config,
optimizer_config=optimizer_config)
logger.info("Initializing ZeRO model and optimizer finished!", ranks=[0])
# FIXME() throw a warning if using zero with MP
if gpc.get_world_size(ParallelMode.MODEL) > 1:
logger.warning("ZeRO currently has not been tested with model parallelism.", ranks=[0])
else:
if isinstance(model, nn.Module):
# first sync model across dp ranks
model.to(get_current_device())
elif isinstance(model, Callable):
model = model().to(get_current_device())
# optimizer maybe a optimizer_cls
logger.warning("Initializing an non ZeRO model with optimizer class")
if isinstance(optimizer, Callable):
optimizer = optimizer(model.parameters())
if not use_zero:
if is_using_sequence():
sync_model_param(model, ParallelMode.SEQUENCE_DP)
elif MOE_CONTEXT.is_initialized:
sync_moe_model_param(model)
elif is_using_ddp():
sync_model_param(model, ParallelMode.DATA)
else:
logger.warning(
"The parameters of models is not automatically synchronized.\n"
"Please make sure that all parameters are the same in data parallel group.",
ranks=[0])
# check amp and zero
fp16_cfg = gpc.config.get('fp16', None)
if fp16_cfg is not None and fp16_cfg.mode is not None and use_zero:
raise ConfigException(
"It is not allowed to set fp16 and zero configuration in your config file at the same time")
# clip grad norm
clip_grad_norm = gpc.config.get('clip_grad_norm', 0.0)
if clip_grad_norm > 0:
if use_zero and zero_cfg is not None:
raise ConfigException(
"clip_grad_norm should be specified with zero, you should specify clip_grad in zero configuration")
# initialize amp
amp_mode = None
if fp16_cfg is not None and fp16_cfg.mode is not None:
cfg_ = fp16_cfg.copy()
amp_mode = cfg_.pop('mode')
if is_using_pp():
assert amp_mode == AMP_TYPE.NAIVE, 'Pipeline only support NaiveAMP currently'
if amp_mode == AMP_TYPE.NAIVE:
cfg_['clip_grad_norm'] = clip_grad_norm
model, optimizer, criterion = convert_to_amp(model=model,
optimizer=optimizer,
criterion=criterion,
mode=amp_mode,
amp_config=cfg_)
# gradient handler
gradient_handler_cfg = gpc.config.get('gradient_handler', None)
if gradient_handler_cfg is None:
# if gradient handler is not specified in the configuration file,
# check in the following order
# 1. if optimizer is ZERO, then use zero grad handler
# 2. if dp size is larger than 1 and pipeline is not used, use pytorch ddp
# 3. if using pipeline and dp size larger than 1, use data parallel grad handler
if isinstance(optimizer, ShardedOptimizerV2):
gradient_handler_cfg = [dict(type='ZeROGradientHandler')]
if verbose:
logger.info(
"Training with zero is detected, ZeROGradientHandler is automatically "
"added even though not specified in the configuration",
ranks=[0])
elif is_using_ddp() and MOE_CONTEXT.is_initialized:
gradient_handler_cfg = [dict(type='MoeGradientHandler')]
if verbose:
logger.info(
"Data parallel training is detected with moe parallel, MoeGradientHandler is automatically "
"added even though not specified in the configuration",
ranks=[0])
elif is_using_sequence():
model = DDP(model,
process_group=gpc.get_group(ParallelMode.SEQUENCE_DP),
device_ids=[torch.cuda.current_device()])
if verbose:
logger.info('Model is using torch.nn.parallel.DistributedDataParallel for Sequence Parallelism',
ranks=[0])
elif is_using_ddp() and not is_using_pp() and amp_mode != AMP_TYPE.NAIVE:
model = DDP(model, process_group=gpc.get_group(ParallelMode.DATA), device_ids=[torch.cuda.current_device()])
if verbose:
logger.info('Model is using torch.nn.parallel.DistributedDataParallel for Data Parallelism', ranks=[0])
elif is_using_ddp():
gradient_handler_cfg = [dict(type='DataParallelGradientHandler')]
if verbose:
logger.info(
"Data parallel training is detected when using pipeline parallel, "
"DataParallelGradientHandler is automatically "
"added even though not specified in the configuration",
ranks=[0])
# add pipeline parallel gradient handler, if pipeline shared module is detected
for param in model.parameters():
if getattr(param, 'pipeline_shared_module_pg', None) is not None:
if gradient_handler_cfg is None:
gradient_handler_cfg = [dict(type='PipelineSharedModuleGradientHandler')]
else:
gradient_handler_cfg.append(dict(type='PipelineSharedModuleGradientHandler'))
if verbose:
logger.info(
"pipeline_shared_module is detected, PipelineSharedModuleGradientHandler is automatically "
"added even though not specified in the configuration",
ranks=[0])
break
else:
if not isinstance(gradient_handler_cfg, list):
raise ConfigException(
f"expected gradient_handler in the configuration file to be a list but got {type(gradient_handler_cfg)}"
)
# turn off sync buffer for NaiveAMPModel if using torch DDP and NaiveAMPModel at the same time
# to avoid duplicated buffer synchronization
if isinstance(model, DDP) and isinstance(model.module, NaiveAMPModel):
model.module.sync_buffer = False
if gradient_handler_cfg is None:
gradient_handlers = None
if verbose and not isinstance(model, DDP):
logger.warning(
"No PyTorch DDP or gradient handler is set up, please make sure you do not need "
"to all-reduce the gradients after a training step.",
ranks=[0])
else:
gradient_handlers = [build_gradient_handler(cfg, model, optimizer) for cfg in gradient_handler_cfg]
# check if optimizer is ColossalaiOptimizer
if not isinstance(optimizer, (ColossalaiOptimizer, ShardedOptimizerV2)):
optimizer = ColossalaiOptimizer(optim=optimizer)
# gradient accumulation
grad_accum_size = gpc.config.get('gradient_accumulation', None)
if grad_accum_size is not None:
optimizer, train_dataloader, gradient_handlers, lr_scheduler = accumulate_gradient(
model=model,
optimizer=optimizer,
dataloader=train_dataloader,
accumulate_size=grad_accum_size,
gradient_handlers=gradient_handlers,
lr_scheduler=lr_scheduler)
engine = Engine(model=model,
optimizer=optimizer,
criterion=criterion,
gradient_handlers=gradient_handlers,
clip_grad_norm=clip_grad_norm,
ophook_list=ophooks)
return engine, train_dataloader, test_dataloader, lr_scheduler
```
#### File: colossalai/logging/__init__.py
```python
import logging
from typing import List, Optional
from .logger import DistributedLogger
__all__ = ['get_dist_logger', 'DistributedLogger', 'disable_existing_loggers']
def get_dist_logger(name='colossalai'):
"""Get logger instance based on name. The DistributedLogger will create singleton instances,
which means that only one logger instance is created per name.
:param name: name of the logger, name must be unique
:type name: str
:return: a distributed logger instance
:rtype: :class:`colossalai.logging.DistributedLogger`
"""
return DistributedLogger.get_instance(name=name)
def disable_existing_loggers(include: Optional[List[str]] = None, exclude: List[str] = ['colossalai']):
"""Set the level of existing loggers to `WARNING`. By default, it will "disable" all existing loggers except the logger named "colossalai".
Args:
include (Optional[List[str]], optional): Loggers whose name in this list will be disabled.
If set to `None`, `exclude` argument will be used. Defaults to None.
exclude (List[str], optional): Loggers whose name not in this list will be disabled.
This argument will be used only when `include` is None. Defaults to ['colossalai'].
"""
if include is None:
filter_func = lambda name: name not in exclude
else:
filter_func = lambda name: name in include
for log_name in logging.Logger.manager.loggerDict.keys():
if filter_func(log_name):
logging.getLogger(log_name).setLevel(logging.WARNING)
```
#### File: zero/shard_utils/tensor_shard_strategy.py
```python
from typing import List, Optional
import torch
import torch.distributed as dist
from colossalai.utils import get_current_device
from colossalai.utils.memory_utils.utils import colo_model_data_tensor_move, colo_model_data_tensor_move_inline
from colossalai.zero.shard_utils import BaseShardStrategy
from colossalai.zero.shard_utils.commons import get_shard
from colossalai.zero.sharded_param.sharded_tensor import ShardedTensor
class TensorShardStrategy(BaseShardStrategy):
"""
A naive implementation which shard each tensor evenly over all ranks
"""
def shard(self, tensor_list: List[ShardedTensor], process_group: Optional[dist.ProcessGroup] = None):
for t in tensor_list:
self._shard_tensor(t, process_group)
def gather(self, tensor_list: List[ShardedTensor], process_group: Optional[dist.ProcessGroup] = None):
for t in tensor_list:
self._gather_tensor(t, process_group)
def _shard_tensor(self, t: ShardedTensor, process_group: Optional[dist.ProcessGroup] = None):
""" Shard tensor among processes.
Args:
t (ShardedTensor): a tensor to be sharded.
process_group (Optional[dist.ProcessGroup], optional): the process group among which tensor shards.
Defaults to None.
"""
if t.is_sharded:
return
if t.payload.device.type == 'cuda':
assert t.payload.device.index == get_current_device(), f"shard tensor on cuda device index {t.payload.device.index},"\
f" but current cuda device is {get_current_device()}"
sharded_payload, _ = get_shard(t.payload, dist.get_rank(process_group), dist.get_world_size(process_group))
t.reset_payload(sharded_payload)
t.is_sharded = True
def _gather_tensor(self, t: ShardedTensor, process_group: Optional[dist.ProcessGroup] = None):
if not t.is_sharded:
return
target_device = t.device
buffer_list = []
payload_numel = t.payload.numel()
world_size = dist.get_world_size(process_group)
rank = dist.get_rank(process_group)
for i in range(world_size):
if i == rank:
buffer_list.append(t.payload.cuda(get_current_device()))
else:
buffer_list.append(torch.zeros(payload_numel, dtype=t.dtype, device=get_current_device()))
dist.all_gather(buffer_list, buffer_list[rank], group=process_group, async_op=False)
gathered_payload = torch.narrow(torch.cat(buffer_list), 0, 0, t.origin_numel).reshape(t.origin_shape)
t.reset_payload(gathered_payload)
colo_model_data_tensor_move_inline(t, target_device, use_tracer=False)
t.is_sharded = False
``` |
{
"source": "jinong-devteam/pyjns",
"score": 4
} |
#### File: pyjns/pyjns/enum.py
```python
def enum(*sequential, **named):
"""
a function to generate enum type
"""
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
``` |
{
"source": "jinoobaek-qz/mesh",
"score": 2
} |
#### File: mesh_tensorflow/auto_mtf/memory_estimator.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from mesh_tensorflow.auto_mtf import graph_interface
from mesh_tensorflow.auto_mtf import valid_layouts
class MemoryEstimator(object):
"""Estimates memory cost of a MTF graph based on the size of MTF tensors.
Usage Example:
estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape)
layout_validator = estimator.get_layout_validator()
graph = estimator.get_graph_interface()
Attributes:
mtf_graph: an mtf.Graph, see argument in __init__.
mesh_shape: an mtf.Shape, see argument in __init__.
mtf_outputs: an iterable of mtf.Tensor, see argument in __init__.
"""
def __init__(self, mtf_graph, mesh_shape, mtf_outputs=()):
"""Initializer.
Args:
mtf_graph: a mtf.Graph.
mesh_shape: an mtf.Shape.
mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs
of the computation.
"""
self.mtf_graph = mtf_graph
self.mesh_shape = mesh_shape
self.mtf_outputs = mtf_outputs
self._layout_validator = None # valid_layouts.LayoutValidator
self._graph_interface = None # graph_interface.GraphInterface
def get_layout_validator(self):
"""LayoutValidator for the model and mesh_shape.
Returns:
a valid_layouts.LayoutValidator
"""
if self._layout_validator is None:
self._compute_layout_validator()
return self._layout_validator
def get_graph_interface(self):
"""GraphInterface representation of the model's computation graph.
Returns:
a graph_interface.GraphInterface
"""
if self._graph_interface is None:
self._compute_graph_interface()
return self._graph_interface
def _compute_layout_validator(self):
"""Computes self._layout_validator."""
self._layout_validator = valid_layouts.LayoutValidator(self.mtf_graph,
self.mesh_shape)
def _compute_graph_interface(self):
"""Computes self._graph_interface."""
self._graph_interface = graph_interface.GraphInterface(self.mtf_graph)
for mtf_output in self.mtf_outputs:
self._graph_interface.set_tensor_final(mtf_output.name)
```
#### File: mesh_tensorflow/auto_mtf/valid_layouts_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
from mesh_tensorflow.auto_mtf import valid_layouts
import tensorflow.compat.v1 as tf
class LayoutValidatorTest(tf.test.TestCase):
def setUp(self):
super(LayoutValidatorTest, self).setUp()
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
a_dim = mtf.Dimension("a", 5)
b_dim = mtf.Dimension("b", 10)
concat_dim1 = mtf.Dimension("concat", 15)
concat_dim2 = mtf.Dimension("concat", 20)
x1 = mtf.zeros(mesh, mtf.Shape([a_dim, b_dim, concat_dim1]))
x2 = mtf.zeros(mesh, mtf.Shape([a_dim, b_dim, concat_dim2]))
mtf.ConcatOperation([x1, x2], "concat")
# We add a tensor with anonymous shape, which is supposed to be
# unsplittable (i.e. none of its dimensions show up during
# test_SplittableMtfDimensionNames).
_ = mtf.zeros(mesh, mtf.anonymous_shape(mtf.Shape([a_dim, b_dim])))
mesh_shape = mtf.Shape([("m1", 4), ("m2", 2)])
self.valid_layouts = valid_layouts.LayoutValidator(graph, mesh_shape)
def test_SplittableMtfDimensionNames(self):
self.assertEqual(self.valid_layouts.splittable_mtf_dimension_names,
set(["a", "b"]))
def test_MeshDimensionNameToSize(self):
self.assertEqual(self.valid_layouts.mesh_dimension_name_to_size,
{"m1": 4, "m2": 2})
def test_is_valid_assignment(self):
# Due to divisibility, the a dimension cannot be assigned to m1 or m2.
self.assertFalse(self.valid_layouts.is_valid_assignment("a", "m1"))
self.assertFalse(self.valid_layouts.is_valid_assignment("a", "m2"))
# The b dimension can only be assigned to m2.
self.assertFalse(self.valid_layouts.is_valid_assignment("b", "m1"))
self.assertTrue(self.valid_layouts.is_valid_assignment("b", "m2"))
# Due to ConcatOperation, the concat dimension may not be assigned.
self.assertFalse(self.valid_layouts.is_valid_assignment("concat", "m1"))
self.assertFalse(self.valid_layouts.is_valid_assignment("concat", "m2"))
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.test.main()
```
#### File: mesh_tensorflow/transformer/transformer_layers.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow import layers
from mesh_tensorflow.transformer import attention
from mesh_tensorflow.transformer import transformer
import tensorflow.compat.v1 as tf
@gin.configurable
class DenseReluDense(transformer.TransformerLayer):
"""Two fully-connected layers with feed-forward activation."""
def __init__(self, hidden_size=4096, dropout_rate=0.0):
"""Create a DenseReluDense.
Args:
hidden_size: an integer - size of the hidden layer
dropout_rate: a floating-point number
"""
self.hidden_size = hidden_size
self.dropout_rate = dropout_rate
def call(self, context, x, losses=None):
"""Call the layer."""
io_channels = x.shape.dims[-1]
hidden_channels = mtf.Dimension("d_ff", self.hidden_size)
if context.model.ensemble_dim:
expert_dims = [context.model.ensemble_dim]
else:
expert_dims = None
h = mtf.layers.dense(x, hidden_channels,
use_bias=False, activation=mtf.relu,
variable_dtype=context.variable_dtype,
reduced_dims=x.shape.dims[-1:],
name="wi", expert_dims=expert_dims)
if context.train and self.dropout_rate != 0.0:
h = mtf.dropout(h, 1.0 - self.dropout_rate,
noise_shape=h.shape - context.length_dim)
return mtf.layers.dense(h, io_channels, use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
reduced_dims=h.shape.dims[-1:],
name="wo", expert_dims=expert_dims)
def attention_params(context,
kv_dim,
num_heads,
num_memory_heads=0,
shared_kv=False):
"""Attention Parameters for Transformer Layers.
The num_heads argument indicates the number of read-heads.
For the familiar behavior described in "Attention Is All You Need", set
num_memory_heads=0.
If num_memory_heads==1, then there is only a single write-head, and multiple
read-heads. This leads to faster incremental decoding, since the
recurrent state is smaller
If num_memory_heads > 1, then num_memory_heads indicates the number of
write-heads. A fraction of the read-heads read each write-head.
num_memory_heads must divide num_heads. This behavior has not yet been tested.
Args:
context: a transformer.Context
kv_dim: a dimension (for key and value channels)
num_heads: an integer
num_memory_heads: an optional integer
shared_kv: a boolean
Returns:
an attention.AttentionParams object
"""
if num_heads == 1:
query_heads_dims = None
memory_heads_dims = None
elif num_memory_heads == 0:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = query_heads_dims
elif num_memory_heads == 1:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = None
else:
if num_heads % num_memory_heads != 0:
raise ValueError("num_memory_heads must divide num_heads")
memory_heads_dims = [mtf.Dimension("heads", num_memory_heads)]
query_heads_dims = memory_heads_dims + [
mtf.Dimension("query_heads", num_heads // num_memory_heads)]
return attention.AttentionParams(
context.mesh,
query_input_dim=context.model.model_dim,
memory_input_dim=context.model.model_dim,
output_dim=context.model.model_dim,
key_dim=kv_dim,
value_dim=kv_dim,
query_heads_dims=query_heads_dims,
memory_heads_dims=memory_heads_dims,
variable_dtype=context.variable_dtype,
shared_kv=shared_kv,
ensemble_dim=context.model.ensemble_dim)
@gin.configurable
class SelfAttention(transformer.TransformerLayer):
"""Multi-head self-attention layer."""
def __init__(self,
num_heads=8,
num_memory_heads=0,
key_value_size=128,
shared_kv=False,
dropout_rate=0.0,
attention_kwargs=None,
relative_attention_type=None,
relative_attention_num_buckets=32):
"""Create a SelfAttention Layer.
Args:
num_heads: an integer
num_memory_heads: an optional integer
key_value_size: an integer
shared_kv: a boolean
dropout_rate: a float
attention_kwargs: a dictionary of kwargs for attention.attention
relative_attention_type: an optional string - one of
(None, "bias", "bias_shared", "contextual")
relative_attention_num_buckets: an integer
"""
self.num_heads = num_heads
self.num_memory_heads = num_memory_heads
self.key_value_size = key_value_size
self.shared_kv = shared_kv
self.dropout_rate = dropout_rate
self.attention_kwargs = attention_kwargs or {}
self.relative_attention_type = relative_attention_type
self.relative_attention_num_buckets = relative_attention_num_buckets
def attention_kwargs_from_context(self, context):
kwargs = copy.copy(self.attention_kwargs)
kwargs["dropout_rate"] = self.dropout_rate if context.train else 0.0
if "dropout_broadcast_dims" not in kwargs:
kwargs["dropout_broadcast_dims"] = [context.length_dim]
return kwargs
def make_params(self, context):
return attention_params(context=context,
kv_dim=self.kv_dim,
num_heads=self.num_heads,
num_memory_heads=self.num_memory_heads,
shared_kv=self.shared_kv)
def call(self, context, x, losses=None):
"""Call the layer."""
params = self.make_params(context)
q = params.compute_q(x)
memory_length = self.memory_length(context)
if context.mode == "incremental":
m = x
else:
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
if self.shared_kv:
kv = params.compute_kv(m)
else:
k = params.compute_k(m)
v = params.compute_v(m)
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
if self.shared_kv:
old_kv = context.get_states(1)
kv = old_kv * inv_one_hot + kv * one_hot
else:
old_k, old_v = context.get_states(2)
k = old_k * inv_one_hot + k * one_hot
v = old_v * inv_one_hot + v * one_hot
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([kv] if self.shared_kv else [k, v])
if self.shared_kv:
k = kv
v = kv
o = attention.attention(
q, k, v,
memory_length,
self.kv_dim,
self.kv_dim,
self.compute_bias(context, memory_position, x),
**self.attention_kwargs_from_context(context))
return params.compute_output(o, output_shape=x.shape)
def compute_bias(self, context, memory_position, x):
"""Compute attention bias.
Args:
context: a transformer.Context
memory_position: an int32 tensor containing memory_length dimension.
x: a Tensor - the query antecedent - required for relative attention
Returns:
a Tensor or None
"""
min_relative_position = self.min_relative_position(context)
max_relative_position = self.max_relative_position(context)
# we can often cache the result of this function between similar layers
can_cache = (
self.relative_attention_type is None or
self.relative_attention_type == "bias_shared")
if can_cache:
cache_key = ("self_attention_mask",
min_relative_position,
max_relative_position,
self.relative_attention_type,
self.num_heads)
if cache_key in context.cache:
return context.cache[cache_key]
biases = []
relative_position = memory_position - context.position
if min_relative_position is not None:
visible = mtf.greater_equal(relative_position, min_relative_position)
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if max_relative_position is not None:
visible = mtf.less_equal(relative_position, max_relative_position)
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if context.read_priority is not None:
visible = mtf.greater_equal(
context.read_priority,
mtf.layers.rename_length_to_memory_length(context.write_priority))
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
sequence_id = None
# Subsequence id should only be set if we are in the decoder and have
# multiple targets per input. This will allow each sub-target to only attend
# to itself.
if isinstance(context.subsequence_id, mtf.Tensor):
sequence_id = context.subsequence_id
elif isinstance(context.sequence_id, mtf.Tensor):
sequence_id = context.sequence_id
if (sequence_id is not None and context.length_dim in sequence_id.shape):
visible = mtf.equal(
sequence_id,
self.rename_length_to_memory_length(sequence_id, context))
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if self.relative_attention_type is not None:
buckets_dim = mtf.Dimension(
"buckets", self.relative_attention_num_buckets)
heads_dim = mtf.Dimension("heads", self.num_heads)
bidirectional = not context.model.fully_autoregressive
rp_bucket = _relative_position_bucket(
relative_position,
bidirectional=bidirectional,
num_buckets=buckets_dim.size)
if (self.relative_attention_type == "bias" or
self.relative_attention_type == "bias_shared"):
bias_shape = [heads_dim, buckets_dim]
if context.model.ensemble_dim:
bias_shape = [context.model.ensemble_dim] + bias_shape
values = mtf.get_variable(
context.mesh, "relative_attention_bias",
bias_shape, dtype=context.variable_dtype)
elif self.relative_attention_type == "contextual":
if context.model.ensemble_dim:
expert_dims = [context.model.ensemble_dim]
else:
expert_dims = None
values = layers.dense(
x, [buckets_dim, heads_dim],
variable_dtype=context.variable_dtype,
name="relative_attention_contextual",
expert_dims=expert_dims)
else:
raise ValueError("unrecognized relative_attention_type \"%s\"" %
self.relative_attention_type)
biases.append(mtf.gather(values, rp_bucket, buckets_dim))
ret = mtf.add_n(biases) if biases else None
if can_cache:
context.cache[cache_key] = ret
return ret
@property
def kv_dim(self):
return mtf.Dimension("d_kv", self.key_value_size)
def memory_length(self, context):
return mtf.Dimension("memory_length", context.length_dim.size)
def rename_length_to_memory_length(self, x, context):
return mtf.replace_dimensions(
x, context.length_dim, self.memory_length(context))
def min_relative_position(self, context):
return None
def max_relative_position(self, context):
return None
@gin.configurable
class EncDecAttention(SelfAttention):
"""Multi-head attention over encoder output."""
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
memory_antecedent = self._get_memory_antecedent(context)
memory_input_dim = memory_antecedent.shape[-1]
if memory_input_dim != context.model.model_dim:
raise NotImplementedError(
"TODO(noam): support different model_dim in encoder and decoder.")
params = self.make_params(context)
q = params.compute_q(x)
if context.mode == "incremental":
k, v, memory_length = context.get_constant_state()
else:
m = memory_antecedent
if self.shared_kv:
kv = params.compute_kv(m)
k = kv
v = kv
else:
k = params.compute_k(m)
v = params.compute_v(m)
memory_length, = [d for d in m.shape.dims if d.name == "memory_length"]
if context.mode == "first_part":
context.record_constant_state((k, v, memory_length))
if context.encoder_sequence_id and context.sequence_id:
visible = mtf.equal(context.sequence_id, context.encoder_sequence_id)
bias = attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype)
else:
bias = None
o = attention.attention(
q, k, v,
memory_length,
self.kv_dim,
self.kv_dim,
bias,
**self.attention_kwargs_from_context(context))
return params.compute_output(o, output_shape=x.shape)
@gin.configurable
class TransparentEncDecAttention(EncDecAttention):
"""Transparent multi-head attention over encoder output."""
def __init__(self,
layers_per_encoder_module=gin.REQUIRED,
layers_per_decoder_module=gin.REQUIRED,
encoder_num_modules=gin.REQUIRED,
decoder_num_modules=gin.REQUIRED,
dropout_rate=0.0,
**kwargs):
"""Create a transparent attention EncDec Layer.
Args:
layers_per_encoder_module: positive integer telling how many layer are in
each repeated module in the encoder
layers_per_decoder_module: positive integer telling how many layer are in
each repeated module in the decoder
encoder_num_modules: positive integer of how many repeated modules there
are in the encoder
decoder_num_modules: positive integer of how many repeated modules there
are in the decoder
dropout_rate: positive float, the dropout rate for the matrix relating
encoder outputs to decoder inputs
**kwargs: additional constructor params
"""
super(TransparentEncDecAttention, self).__init__(**kwargs)
self.layers_per_encoder_module = layers_per_encoder_module
self.layers_per_decoder_module = layers_per_decoder_module
self.encoder_num_modules = encoder_num_modules
self.decoder_num_modules = decoder_num_modules
self.dropout_rate = dropout_rate
def _get_memory_antecedent(self, context):
decoder_module_index = context.layer_index // self.layers_per_decoder_module
decoder_inputs = self._get_decoder_inputs(context)
return decoder_inputs[decoder_module_index]
def _get_decoder_inputs(self, context):
"""Computes the inputs to the decoder when using transparent attention.
We must cache on the context in order to ensure that we are not replicating
variables when the layer's call function is called in different tf variable
scopes.
Args:
context: a Context
Returns:
a list containing `self.num_decoder_modules` of tensors with shape
[<batch_dims>, length_dim, output_vocab_dim]
"""
if hasattr(context, "decoder_layers_per_module"):
return context.decoder_layers_per_module
encoder_layer_outputs = [
mtf.layers.rename_length_to_memory_length(output)
for output in context.encoder_layer_outputs
]
layers_per_module = self.layers_per_encoder_module
encoder_module_outputs_dim = mtf.Dimension(
"encoder_module_outputs", size=self.encoder_num_modules + 1)
decoder_module_inputs_dim = mtf.Dimension(
"decoder_module_inputs", size=self.decoder_num_modules)
encoder_module_outputs = mtf.stack(
[encoder_layer_outputs[0]] +
encoder_layer_outputs[layers_per_module::layers_per_module],
dim_name="encoder_module_outputs")
w = mtf.get_variable(
context.mesh,
"w",
mtf.Shape([encoder_module_outputs_dim, decoder_module_inputs_dim]),
initializer=tf.random_normal_initializer(
stddev=(encoder_module_outputs_dim.size *
decoder_module_inputs_dim.size)**-0.5),
dtype=context.variable_dtype)
if context.train and self.dropout_rate != 0.0:
w = mtf.dropout(w, 1.0 - self.dropout_rate)
s = mtf.softmax(w, reduced_dim=encoder_module_outputs_dim)
z = mtf.einsum([s, encoder_module_outputs],
reduced_dims=[encoder_module_outputs_dim])
input_per_decoder = mtf.split(
z,
split_dim=decoder_module_inputs_dim,
num_or_size_splits=decoder_module_inputs_dim.size)
context.decoder_layers_per_module = [
mtf.reshape(inpt, z.shape.dims[1:]) for inpt in input_per_decoder
]
return context.decoder_layers_per_module
@gin.configurable
class LocalSelfAttention(SelfAttention):
"""Multi-head local self-attention layer."""
def __init__(self,
radius=128,
num_heads=8,
num_memory_heads=0,
key_value_size=128,
shared_kv=False,
dropout_rate=0.0,
attention_kwargs=None,):
super(LocalSelfAttention, self).__init__(
num_heads,
num_memory_heads,
key_value_size,
shared_kv,
dropout_rate,
attention_kwargs)
self.radius = radius
def call(self, context, x, losses=None):
"""Call the layer."""
params = self.make_params(context)
q = params.compute_q(x)
if self.shared_kv:
kv = params.compute_kv(x)
k = kv
v = kv
else:
k = params.compute_k(x)
v = params.compute_v(x)
if context.mode == "incremental":
if self.shared_kv:
prev_kv, = context.get_states(1)
else:
prev_k, prev_v = context.get_states(2)
current_position = mtf.equal(
mtf.range(context.mesh, self.window_dim, dtype=tf.int32),
mtf.mod(context.position, self.radius))
if self.shared_kv:
kv = mtf.where(current_position, kv, prev_kv,
output_shape=prev_kv.shape)
k = kv
v = kv
context.record_new_states([kv])
else:
k = mtf.where(current_position, params.compute_k(x), prev_k,
output_shape=prev_k.shape)
v = mtf.where(current_position, params.compute_v(x), prev_v,
output_shape=prev_v.shape)
context.record_new_states([k, v])
window_pos = mtf.range(context.mesh, self.window_dim, tf.int32)
visible = mtf.greater_equal(context.position, window_pos)
bias = attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype)
o = attention.attention(
q,
k,
v,
self.window_dim,
self.kv_dim,
self.kv_dim,
bias,
**self.attention_kwargs_from_context(context))
elif context.length_dim.size <= max(256, self.radius * 4):
# nothing fancy - just do full attention and mask
memory_length = self.rename_length_to_memory_length(
context.position, context)
o = attention.attention(
q,
self.rename_length_to_memory_length(k, context),
self.rename_length_to_memory_length(v, context),
self.memory_length(context),
self.kv_dim,
self.kv_dim,
self.compute_bias(context, memory_length, x),
**self.attention_kwargs_from_context(context))
else:
# fancy local attention algorithm
o = attention.local_attention_1d(
q=q,
k=k,
v=None if self.shared_kv else v,
length_dim=context.length_dim,
key_dim=self.kv_dim,
value_dim=self.kv_dim,
length_dim_num_splits=1, # TODO(noam): look at the layout
autoregressive=context.model.fully_autoregressive,
radius=self.radius,
sequence_id=context.sequence_id,
write_priority=context.write_priority,
read_priority=context.read_priority,
attention_kwargs=self.attention_kwargs_from_context(context))
if context.mode == "first_part":
window_pos = mtf.range(context.mesh, self.window_dim, tf.int32)
pos = mtf.range(context.mesh, context.length_dim, tf.int32)
select_recent = mtf.cast(
mtf.equal(mtf.mod(pos, self.radius), window_pos), x.dtype)
select_recent *= mtf.cast(
mtf.less(pos, context.initial_position), x.dtype)
select_recent *= mtf.cast(
mtf.greater_equal(
pos, context.initial_position - self.radius), x.dtype)
state_shape = (k.shape - [context.length_dim, self.kv_dim]
+ [self.window_dim, self.kv_dim])
k_state = mtf.einsum(
[k, select_recent], output_shape=state_shape,
reduced_dims=[context.length_dim])
context.new_states.append(k_state)
if not self.shared_kv:
v_state = mtf.einsum(
[v, select_recent], output_shape=state_shape,
reduced_dims=[context.length_dim])
context.new_states.append(v_state)
return params.compute_output(o, output_shape=x.shape)
def min_relative_position(self, context):
return 1 - self.radius
def max_relative_position(self, context):
return None if context.model.fully_autoregressive else self.radius
@property
def window_dim(self):
return mtf.Dimension("window", self.radius)
def _relative_position_bucket(relative_position,
bidirectional=True,
num_buckets=32,
max_distance=128):
"""Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger buckets
for larger absolute relative_positions. All relative positions >=max_distance
map to the same bucket. All relative positions <=-max_distance map to the
same bucket. This should allow for more graceful generalization to longer
sequences than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += mtf.to_int32(mtf.less(n, 0)) * num_buckets
n = mtf.abs(n)
else:
n = mtf.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = mtf.less(n, max_exact)
val_if_large = max_exact + mtf.to_int32(
mtf.log(mtf.to_float(n) / max_exact)
/ math.log(max_distance / max_exact) * (num_buckets - max_exact))
val_if_large = mtf.minimum(val_if_large, num_buckets - 1)
ret += mtf.where(is_small, n, val_if_large)
return ret
``` |
{
"source": "jinouwuque/movieDesignationManager",
"score": 3
} |
#### File: movieDesignationManager/jobs/do_csv.py
```python
from src.FileWalker import FileWalker
from src.CsvWriter import CsvWriter
from local.constant_var import paths
def do_csv():
walker = FileWalker(paths)
list = walker.getList()
map = {
'company': 'c',
'id': 'i',
'movie': 'm',
'image': 'im',
'url': 'r'
}
print("generating the CSV file ... ")
wr = CsvWriter()
wr.start(map, list)
print('================')
print('Finished!')
print('================')
``` |
{
"source": "Jinpachi5/GroovyToo",
"score": 3
} |
#### File: GroovyToo/cogs/clear.py
```python
import discord
from discord.ext import commands
import asyncio
class Clear(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def clear(self, ctx, amount = 1):
if amount <= 100:
await ctx.channel.purge(limit = amount + 1)
@commands.command()
async def clearAll(self, ctx):
messages = await ctx.channel.history(limit = 100).flatten()
while len(messages) >= 1:
await ctx.channel.purge(limit = 100)
await asyncio.sleep(1)
messages = await ctx.channel.history(limit = 100).flatten()
await asyncio.sleep(1)
def setup(client):
client.add_cog(Clear(client))
``` |
{
"source": "jinpark/something-sunshine-django",
"score": 2
} |
#### File: something-sunshine-django/episodes/models.py
```python
from django.db import models
from django.utils import timezone
from s3direct.fields import S3DirectField
import requests
import datetime
class Episode(models.Model):
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
audio_file_path = S3DirectField(dest='episode_audio')
thumbnail = S3DirectField(dest='episode_thumbnail')
description = models.TextField()
tags = models.ManyToManyField('Tag', blank=True)
number = models.PositiveIntegerField(default=1, help_text="The episode number")
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(default=timezone.now)
duration = models.CharField(blank=False, max_length=8, default='00:00', help_text='Duration of the audio file, in MM:SS or HH:MM:SS format')
file_size = models.PositiveIntegerField()
show_notes = models.TextField(help_text="Show notes here!")
def __unicode__(self):
return u'{}'.format(self.title)
def save(self, *args, **kwargs):
''' On save, update timestamps and file size'''
if not self.id:
self.created = datetime.datetime.today()
self.modified = datetime.datetime.today()
if self.audio_file_path:
# bleh, this is a bad way to do this
r = requests.head(self.audio_file_path)
self.file_size = float(r.headers['content-length'])
return super(Episode, self).save(*args, **kwargs)
class Meta:
ordering = ('created',)
class Tag(models.Model):
created = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=50, blank=False)
image_file = S3DirectField(dest='tag_image')
created = models.DateTimeField(editable=False, default=timezone.now)
modified = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return u'{}'.format(self.name)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.created = datetime.datetime.today()
self.modified = datetime.datetime.today()
return super(Tag, self).save(*args, **kwargs)
class Meta:
ordering = ('created',)
```
#### File: episodes/templatetags/episode_extras.py
```python
from django import template
from django.template.defaultfilters import stringfilter
from django.conf import settings
from urlparse import urlparse
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def xml_escape(string):
"""Replaces all unescaped xml characters"""
return string.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')
@register.simple_tag
def static_url(url):
""" returns the static url-ed version of the path, and not the s3 version """
full_s3_path = urlparse(url).path
relative_path = "/".join(full_s3_path.split('/')[2:])
return u"{}{}".format(settings.STATIC_BUCKET_URL, relative_path)
``` |
{
"source": "jinpeng01/WGSum",
"score": 2
} |
#### File: WGSum/graph_construction/graph_construction.py
```python
import stanza
import os
import json
from tqdm import tqdm
import sys,os
import pickle
import numpy as np
import sys,os
sys.path.append('/data/data/hujingpeng/sumwithGraph')
nlp = stanza.Pipeline('en', package='mimic', processors={'ner': 'radiology'})
nlp2 = stanza.Pipeline('en', package='mimic', processors='tokenize')
EMB_INIT_RANGE = 1.0
def get_single_entity_graph(document,impression,entity_modified=True,entity_interval=True,entity_deparser=True):
doc = nlp(document)
imp = nlp2(impression)
fingings_list = []
impression_list = []
current_senquence_num = 0
edges = []
edge_words = []
edges_type = dict()
edges_type['deparser'] = []
edges_type['modified'] = []
edges_type['interval'] = []
for sentence in imp.sentences:
for i in range(len(sentence.words)):
impression_list.append(sentence.words[i].text)
for sentence in doc.sentences:
for i in range(len(sentence.words)):
fingings_list.append(sentence.words[i].text)
entities_sentence = []
entities_id_sentence = []
words_entities_sentence = []
words_id_entities_sentence = []
entities_type = []
edges_sentence = []
edges_word_sentence = []
entities_type_dict = dict()
edges_type_sentence = dict()
edges_type_sentence['deparser'] = []
edges_type_sentence['modified'] = []
edges_type_sentence['interval'] = []
for i in range(len(sentence.tokens)):
token = sentence.tokens[i]
ent_token = token.ner
text_token = token.text
id_token = token.id[0]
if ent_token!='O':
ent_index = ent_token.split('-')[0]
words_entities_sentence.append(text_token)
words_id_entities_sentence.append(id_token)
current_ent_type = ent_token.split('-')[-1]
if ent_index == 'S':
entities_sentence.append([text_token])
entities_id_sentence.append([id_token])
entities_type.append(current_ent_type)
if current_ent_type not in entities_type_dict:
entities_type_dict[current_ent_type] = [id_token]
else:
entities_type_dict[current_ent_type].append(id_token)
elif ent_index == 'B':
entities_sentence.append([text_token])
entities_id_sentence.append([id_token])
elif ent_index == 'I':
try:
entities_sentence[-1].append(text_token)
entities_id_sentence[-1].append(id_token)
except:
entities_sentence.append([text_token])
entities_id_sentence.append([id_token])
elif ent_index == 'E':
entities_sentence[-1].append(text_token)
entities_id_sentence[-1].append(id_token)
entities_type.append(ent_token.split('-')[-1])
if current_ent_type not in entities_type_dict:
entities_type_dict[current_ent_type] = entities_id_sentence[-1]
else:
entities_type_dict[current_ent_type] = entities_type_dict[current_ent_type]+entities_id_sentence[-1]
if entity_deparser:
if 'deparser' not in edges_type_sentence.keys():
edges_type_sentence['deparser'] = []
for word in sentence.words:
word_id = word.id
word_head = word.head
if word_id in words_id_entities_sentence and word_head in words_id_entities_sentence:
word_doc_id = word_id+current_senquence_num-1
word_doc_head = word_head+current_senquence_num-1
if word_doc_id>=0 and word_doc_head>=0 and [word_doc_id,word_doc_head] not in edges_sentence\
and [word_doc_head,word_doc_id] not in edges_sentence:
edges_sentence.append([word_doc_id,word_doc_head])
edges_word_sentence.append([sentence.words[word_id-1].text,sentence.words[word_head-1].text])
if word_doc_id>=0 and word_doc_head>=0:
edges_type_sentence['deparser'].append([word_doc_id,word_doc_head])
# print('parser', sentence.words[word_id-1].text,sentence.words[word_head-1].text)
if entity_modified:
if 'modified' not in edges_type_sentence.keys():
edges_type_sentence['modified'] = []
if 'ANATOMY' in entities_type_dict and 'ANATOMY_MODIFIER' in entities_type_dict:
anatomy_ids = entities_type_dict['ANATOMY']
anatomy_modifier_ids = entities_type_dict['ANATOMY_MODIFIER']
for anatomy_id in anatomy_ids:
for anatomy_modifier_id in anatomy_modifier_ids:
anatomy_doc_id = anatomy_id + current_senquence_num - 1
anatomy_modifier_doc_id = anatomy_modifier_id + current_senquence_num - 1
if anatomy_doc_id >= 0 and anatomy_modifier_doc_id >= 0 and [anatomy_doc_id,anatomy_modifier_doc_id] not in edges_sentence\
and [anatomy_modifier_doc_id,anatomy_doc_id] not in edges_sentence:
edges_sentence.append([anatomy_doc_id, anatomy_modifier_doc_id])
edges_word_sentence.append(
[sentence.words[anatomy_id - 1].text, sentence.words[anatomy_modifier_id - 1].text])
if anatomy_doc_id >= 0 and anatomy_modifier_doc_id >= 0:
edges_type_sentence['modified'].append([anatomy_doc_id, anatomy_modifier_doc_id])
# print('anatomy', sentence.words[anatomy_id - 1].text,
# sentence.words[anatomy_modifier_id - 1].text)
if 'OBSERVATION' in entities_type_dict and 'OBSERVATION_MODIFIER' in entities_type_dict:
observation_ids = entities_type_dict['OBSERVATION']
observation_modifier_ids = entities_type_dict['OBSERVATION_MODIFIER']
for observation_id in observation_ids:
for observation_modifier_id in observation_modifier_ids:
observation_doc_id = observation_id + current_senquence_num - 1
observation_modifier_doc_id = observation_modifier_id + current_senquence_num - 1
if observation_doc_id >= 0 and observation_modifier_doc_id >= 0 and \
[observation_doc_id, observation_modifier_doc_id] not in edges_sentence\
and [observation_modifier_doc_id, observation_doc_id] not in edges_sentence:
edges_sentence.append([observation_doc_id, observation_modifier_doc_id])
edges_word_sentence.append(
[sentence.words[observation_id - 1].text, sentence.words[observation_modifier_id - 1].text])
if observation_doc_id >= 0 and observation_modifier_doc_id >= 0:
edges_type_sentence['modified'].append([observation_doc_id, observation_modifier_doc_id])
# print('observation_id', sentence.words[observation_id - 1].text,
# sentence.words[observation_modifier_id - 1].text)
if entity_interval:
if 'interval' not in edges_type_sentence.keys():
edges_type_sentence['interval'] = []
for m in range(len(entities_id_sentence)):
entity_length = len(entities_id_sentence[m])
if entity_length>1:
for n in range(entity_length-1):
current_id = entities_id_sentence[m][n]
current_tag_id = entities_id_sentence[m][n+1]
current_doc_id = current_id+current_senquence_num-1
current_doc_tag_id = current_tag_id +current_senquence_num-1
if current_doc_id>=0 and current_doc_tag_id>=0 and [current_doc_id,current_doc_tag_id] not in edges_sentence \
and [current_doc_tag_id, current_doc_id] not in edges_sentence :
edges_sentence.append([current_doc_id, current_doc_tag_id])
edges_word_sentence.append([sentence.words[current_id-1].text,sentence.words[current_tag_id-1].text])
if current_doc_id>=0 and current_doc_tag_id>=0:
edges_type_sentence['interval'].append([current_doc_id, current_doc_tag_id])
# print('interval entity', sentence.words[current_doc_id - 1].text,
# sentence.words[current_tag_id - 1].text)
current_senquence_num = current_senquence_num + len(sentence.words)
edges_type['deparser'] = edges_type['deparser'] + edges_type_sentence['deparser']
edges_type['modified'] = edges_type['modified'] + edges_type_sentence['modified']
edges_type['interval'] = edges_type['interval'] + edges_type_sentence['interval']
edges = edges + edges_sentence
edge_words = edge_words+edges_word_sentence
pyg_edges_document = []
src_index = []
tag_index = []
for edge_item in edges:
src_index.append(edge_item[0])
tag_index.append(edge_item[1])
pyg_edges_document.append(src_index)
pyg_edges_document.append(tag_index)
return pyg_edges_document,edge_words,fingings_list,impression_list,edges_type
def build_entity_graph(data_path,entity_modified=True,entity_interval=True,entity_deparser=True):
file = open(data_path, 'r', encoding='utf-8')
lines = file.readlines()
num_line = len(lines)
new_json_path = data_path.replace('.jsonl', '')
name_type = '_with_entity'
if entity_modified:
name_type = name_type + '_modified'
if entity_interval:
name_type = name_type + '_interval'
if entity_deparser:
name_type = name_type + '_deparser'
new_json_path = new_json_path + name_type + '.jsonl'
if (os.path.exists(new_json_path)):
print('there are already exist ' + new_json_path)
return new_json_path
else:
new_json_file = open(new_json_path, 'w', encoding='utf-8')
for i in tqdm(range(num_line)):
dic_items = json.loads(lines[i])
findings_list = dic_items['findings']
findings = ' '.join(findings_list)
impression_list = dic_items['impression']
impression = ' '.join(impression_list)
edges_with_nodeid = []
edges,edge_words,fingings_list,impression_list,edges_type_sentence = get_single_entity_graph(findings,impression,entity_modified=entity_modified,
entity_interval=entity_interval,
entity_deparser=entity_deparser)
dic_items['pyg_edges_document'] = edges
dic_items['findings'] = fingings_list
dic_items['impression'] = impression_list
dic_items['edge_words'] = edge_words
nodes = []
finding_list = dic_items['findings']
edges = dic_items['pyg_edges_document']
src_index = edges[0]
tag_index = edges[1]
src_node_index = []
tag_node_index = []
word_dict = dict()
for k in range(len(tag_index)):
src_word = finding_list[src_index[k]]
tag_word = finding_list[tag_index[k]]
if src_word not in word_dict:
word_dict[src_word] = len(word_dict)
nodes.append(src_word)
if tag_word not in word_dict:
word_dict[tag_word] = len(word_dict)
nodes.append(tag_word)
src_node_index.append(word_dict[src_word])
tag_node_index.append(word_dict[tag_word])
edges_with_nodeid.append(src_node_index)
edges_with_nodeid.append(tag_node_index)
edges_modified = edges_type_sentence['modified']
edges_deparser = edges_type_sentence['deparser']
edges_interval = edges_type_sentence['interval']
edges_modified_with_nodeid = []
edges_deparser_with_nodeid = []
edges_interval_with_nodeid = []
if len(edges_modified)>0:
modified_src_node_index = []
modified_tag_node_index = []
for k in range(len(edges_modified)):
src_word = finding_list[edges_modified[k][0]]
tag_word = finding_list[edges_modified[k][1]]
modified_src_node_index.append(word_dict[src_word])
modified_tag_node_index.append(word_dict[tag_word])
edges_modified_with_nodeid.append(modified_src_node_index)
edges_modified_with_nodeid.append(modified_tag_node_index)
if len(edges_deparser)>0:
deparser_src_node_index = []
deparser_tag_node_index = []
for k in range(len(edges_deparser)):
src_word = finding_list[edges_deparser[k][0]]
tag_word = finding_list[edges_deparser[k][1]]
deparser_src_node_index.append(word_dict[src_word])
deparser_tag_node_index.append(word_dict[tag_word])
edges_deparser_with_nodeid.append(deparser_src_node_index)
edges_deparser_with_nodeid.append(deparser_tag_node_index)
if len(edges_interval)>0:
interval_src_node_index = []
interval_tag_node_index = []
for k in range(len(edges_interval)):
src_word = finding_list[edges_interval[k][0]]
tag_word = finding_list[edges_interval[k][1]]
interval_src_node_index.append(word_dict[src_word])
interval_tag_node_index.append(word_dict[tag_word])
edges_interval_with_nodeid.append(interval_src_node_index)
edges_interval_with_nodeid.append(interval_tag_node_index)
dic_items['nodes'] = nodes
dic_items['edges_with_nodeid'] = edges_with_nodeid
dic_items['edges_interval_with_nodeid'] = edges_interval_with_nodeid
dic_items['edges_modified_with_nodeid'] = edges_modified_with_nodeid
dic_items['edges_deparser_with_nodeid'] = edges_deparser_with_nodeid
if len(fingings_list)>10 and len(impression_list)>3:
print(json.dumps(dic_items), file=new_json_file)
# radiology
def add_edge_words(data_path):
file = open(data_path, 'r', encoding='utf-8')
lines = file.readlines()
num_line = len(lines)
new_json_path = data_path.replace('.jsonl', '')
new_json_path = new_json_path + '_with_entity_graph_node' + '.jsonl'
if (os.path.exists(new_json_path)):
print('there are already exist ' + new_json_path)
return new_json_path
else:
new_json_file = open(new_json_path, 'w', encoding='utf-8')
for i in tqdm(range(num_line)):
dic_items = json.loads(lines[i])
edge_words = []
edges_with_nodeid = []
nodes = []
finding_list = dic_items['findings']
edges = dic_items['pyg_edges_document']
src_index = edges[0]
tag_index = edges[1]
src_node_index = []
tag_node_index = []
word_dict = dict()
for k in range(len(tag_index)):
src_word = finding_list[src_index[k]]
tag_word = finding_list[tag_index[k]]
if src_word not in word_dict:
word_dict[src_word] = len(word_dict)
nodes.append(src_word)
if tag_word not in word_dict:
word_dict[tag_word] = len(word_dict)
nodes.append(tag_word)
edge_words.append([src_word,tag_word])
src_node_index.append(word_dict[src_word])
tag_node_index.append(word_dict[tag_word])
edges_with_nodeid.append(src_node_index)
edges_with_nodeid.append(tag_node_index)
dic_items['edge_words'] = edge_words
dic_items['nodes'] = nodes
dic_items['edges_with_nodeid'] = edges_with_nodeid
print(json.dumps(dic_items), file=new_json_file)
def obtain_word_pair_for(data_path):
file = open(data_path, 'r', encoding='utf-8')
lines = file.readlines()
num_line = len(lines)
new_json_path = data_path.replace('.jsonl', '')
new_json_path = new_json_path + '_words_pair' + '.jsonl'
if (os.path.exists(new_json_path)):
print('there are already exist ' + new_json_path)
return new_json_path
else:
new_json_file = open(new_json_path, 'w', encoding='utf-8')
for i in tqdm(range(num_line)):
dic_items = json.loads(lines[i])
edge_words = []
edges_with_nodeid = []
nodes = []
finding_list = dic_items['findings']
edges = dic_items['pyg_edges_document']
edges_interval_with_nodeid = dic_items['edges_interval_with_nodeid']
edges_modified_with_nodeid = dic_items['edges_modified_with_nodeid']
edges_deparser_with_nodeid = dic_items['edges_deparser_with_nodeid']
edges_word = dic_items['edge_words']
# if len(edges_interval_with_nodeid) == 0:
# edges_interval_with_nodeid = [[0],[0]]
# if len(edges_modified_with_nodeid) == 0:
# edges_modified_with_nodeid = [[0], [0]]
# if len(edges_deparser_with_nodeid) == 0:
# edges_deparser_with_nodeid = [[0], [0]]
src_index = edges[0]
tag_index = edges[1]
node_word = dic_items['nodes']
w2i = dict()
i2w = dict()
for i in range(len(node_word)):
w2i[node_word[i]] = len(w2i)
i2w[len(i2w)] = node_word[i]
if len(edges_interval_with_nodeid) != 0 :
edges_word_interval = []
src_index_edges_interval = edges_interval_with_nodeid[0]
tag_index_edges_interval = edges_interval_with_nodeid[1]
for i in range(len(src_index_edges_interval)):
try:
pair = [i2w[src_index_edges_interval[i]],i2w[tag_index_edges_interval[i]]]
pair_ = [pair[1],pair[0]]
except:
import pdb
pdb.set_trace()
if pair not in edges_word and pair_ not in edges_word:
print('error')
edges_word_interval.append(pair)
else:
edges_word_interval = []
if len(edges_modified_with_nodeid) != 0:
edges_word_modified = []
src_index_edges_modified = edges_modified_with_nodeid[0]
tag_index_edges_modified = edges_modified_with_nodeid[1]
for i in range(len(src_index_edges_modified)):
pair = [i2w[src_index_edges_modified[i]], i2w[tag_index_edges_modified[i]]]
pair_ = [pair[1], pair[0]]
if pair not in edges_word and pair_ not in edges_word:
print('error')
edges_word_modified.append(pair)
else:
edges_word_modified = []
if len(edges_deparser_with_nodeid) != 0:
edges_word_deparser = []
src_index_edges_deparser = edges_deparser_with_nodeid[0]
tag_index_edges_deparser = edges_deparser_with_nodeid[1]
for i in range(len(src_index_edges_deparser)):
pair = [i2w[src_index_edges_deparser[i]], i2w[tag_index_edges_deparser[i]]]
pair_ = [pair[1], pair[0]]
if pair not in edges_word and pair_ not in edges_word:
print('error')
edges_word_deparser.append(pair)
else:
edges_word_deparser = []
dic_items['edges_word_deparser'] = edges_word_deparser
dic_items['edges_word_modified'] = edges_word_modified
dic_items['edges_word_interval'] = edges_word_interval
print(json.dumps(dic_items), file=new_json_file)
if __name__ == '__main__':
build_entity_graph('example.jsonl')
```
#### File: src/others/utils.py
```python
import os
import re
import shutil
import time
from others import pyrouge
from pythonrouge.pythonrouge import Pythonrouge
REMAP = {"-lrb-": "(", "-rrb-": ")", "-lcb-": "{", "-rcb-": "}",
"-lsb-": "[", "-rsb-": "]", "``": '"', "''": '"'}
def clean(x):
return re.sub(
r"-lrb-|-rrb-|-lcb-|-rcb-|-lsb-|-rsb-|``|''",
lambda m: REMAP.get(m.group()), x)
def process(params):
temp_dir, data = params
candidates, references, pool_id = data
cnt = len(candidates)
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}-{}".format(current_time, pool_id))
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
os.mkdir(tmp_dir + "/candidate")
os.mkdir(tmp_dir + "/reference")
try:
for i in range(cnt):
if len(references[i]) < 1:
continue
with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(candidates[i])
with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(references[i])
r = pyrouge.Rouge155(temp_dir=temp_dir)
r.model_dir = tmp_dir + "/reference/"
r.system_dir = tmp_dir + "/candidate/"
r.model_filename_pattern = 'ref.#ID#.txt'
r.system_filename_pattern = r'cand.(\d+).txt'
rouge_results = r.convert_and_evaluate()
print(rouge_results)
results_dict = r.output_to_dict(rouge_results)
finally:
pass
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
return results_dict
def test_rouge(temp_dir, cand, ref):
candidates = [line.strip() for line in open(cand, encoding='utf-8')]
references = [line.strip() for line in open(ref, encoding='utf-8')]
print(len(candidates))
print(len(references))
assert len(candidates) == len(references)
cnt = len(candidates)
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time))
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
os.mkdir(tmp_dir + "/candidate")
os.mkdir(tmp_dir + "/reference")
try:
for i in range(cnt):
if len(references[i]) < 1:
continue
with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(candidates[i])
with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(references[i])
r = pyrouge.Rouge155(temp_dir=temp_dir)
r.model_dir = tmp_dir + "/reference/"
r.system_dir = tmp_dir + "/candidate/"
r.model_filename_pattern = 'ref.#ID#.txt'
r.system_filename_pattern = r'cand.(\d+).txt'
rouge_results = r.convert_and_evaluate()
print(rouge_results)
results_dict = r.output_to_dict(rouge_results)
finally:
pass
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
return results_dict
def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x
def rouge_results_to_str(results_dict):
return ">> ROUGE-F(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format(
results_dict["rouge_1_f_score"] * 100,
results_dict["rouge_2_f_score"] * 100,
# results_dict["rouge_3_f_score"] * 100,
results_dict["rouge_l_f_score"] * 100,
results_dict["rouge_1_recall"] * 100,
results_dict["rouge_2_recall"] * 100,
# results_dict["rouge_3_f_score"] * 100,
results_dict["rouge_l_recall"] * 100
# ,results_dict["rouge_su*_f_score"] * 100
)
def get_rouge(hypotheses, reference, sent_split=True, use_cf=False):
assert len(hypotheses) == len(reference)
assert len(hypotheses) > 0
hyps = []
refs = []
# prepare
for hyp, ref in zip(hypotheses, reference):
hyp = " ".join(hyp)
ref = " ".join(ref)
if sent_split:
hs = [x.strip() for x in hyp.split('.') if len(x.strip()) > 0]
rs = [x.strip() for x in ref.split('.') if len(x.strip()) > 0]
hyps += [hs]
refs += [[rs]]
else:
hyps += [[hyp]]
refs += [[[ref]]]
print("Calculating ROUGE...")
rouge = Pythonrouge(summary_file_exist=False, summary=hyps, reference=refs, \
n_gram=2, ROUGE_SU4=False, ROUGE_L=True, recall_only=False, stemming=False, stopwords=False, \
word_level=True, length_limit=False, use_cf=use_cf, cf=95, scoring_formula='average', \
resampling=True, samples=1000, favor=True, p=0.5)
score = rouge.calc_score()
print("ROUGE done.")
r1 = score['ROUGE-1-F'] * 100
r2 = score['ROUGE-2-F'] * 100
rl = score['ROUGE-L-F'] * 100
if not use_cf:
return r1, r2, rl
# return results_dict2
else:
r1_cf = [x * 100 for x in score['ROUGE-1-F-cf95']]
r2_cf = [x * 100 for x in score['ROUGE-2-F-cf95']]
rl_cf = [x * 100 for x in score['ROUGE-L-F-cf95']]
return r1, r2, rl, r1_cf, r2_cf, rl_cf
def rouge_results_to_str2(results_dict2):
return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\n".format(
results_dict2["rouge_1_f_score"],
results_dict2["rouge_2_f_score"],
results_dict2["rouge_l_f_score"],
)
def calculate_rouge(predict_path,real_path):
predictions = open(predict_path,'r').readlines()
# import pdb
# pdb.set_trace()
predictions = [item.replace('<q>','') for item in predictions]
predictions = [item.strip().split() for item in predictions]
# len(predictions[201])
# type(predictions[200])
# import pdb
# pdb.set_trace()
truths = open(real_path, 'r').readlines()
truths = [item.strip().split() for item in truths]
r1, r2, rl, r1_cf, r2_cf, rl_cf = get_rouge(predictions, truths, use_cf=True)
print("{} set results:\n".format('test'))
print("Metric\tScore\t95% CI")
print("ROUGE-1\t{:.2f}\t({:.2f},{:.2f})".format(r1, r1_cf[0]-r1, r1_cf[1]-r1))
print("ROUGE-2\t{:.2f}\t({:.2f},{:.2f})".format(r2, r2_cf[0]-r2, r2_cf[1]-r2))
print("ROUGE-L\t{:.2f}\t({:.2f},{:.2f})".format(rl, rl_cf[0]-rl, rl_cf[1]-rl))
results_dict2 = dict()
results_dict2["rouge_1_f_score"] = r1
results_dict2["rouge_2_f_score"] = r2
results_dict2["rouge_l_f_score"] = rl
return results_dict2
``` |
{
"source": "jinpeng/kgsanguo",
"score": 3
} |
#### File: kgsanguo/prepare/extract_fulltext_from_epub.py
```python
import os
import ebooklib
from ebooklib import epub
from bs4 import BeautifulSoup
def epub2thtml(epub_path):
book = epub.read_epub(epub_path)
chapters = []
for item in book.get_items():
if item.get_type() == ebooklib.ITEM_DOCUMENT:
chapters.append(item.get_content())
return chapters
# <h1 class="chapterCaption"><a id="CHP14"></a>第十三回<br/>李傕郭汜大交兵<br/>杨奉董承双救驾</h1>
# <h1 class="chapterCaption1"><a id="CHP19"></a>第十八回<br/>贾文和料敌决胜<br/>夏侯惇拔矢啖睛<sup><a class="duokan-footnote" href="#jz_1_172" id="jzyy_1_172"><img alt="" src="../Images/note.png"/></a></sup></h1>
def split_chapter_caption(h1_element):
output = []
for child in h1_element.children:
if child.string != None:
# print(child.string)
output.append(child.string)
return ','.join(output) + "。"
def load_rare_chars(filepath):
d = {}
with open(filepath, 'r') as f:
for line in f:
(key, value) = line.split()
d[key] = value
return d
# <span class="rareFont"><img src="../Images/image01005.gif" alt=""/></span>
def convert_rare_characters(chapter, rare_chars_dict):
rare_chars = chapter.find_all('span', class_='rareFont')
for rare_char in rare_chars:
for child in rare_char.children:
image_path = child['src']
image_name = os.path.basename(image_path)
rare_char.replace_with(rare_chars_dict[image_name])
def thtml2text(thtml, rare_chars_dict):
# print(thtml.decode('utf-8'))
output = []
soup = BeautifulSoup(thtml.decode('utf-8'), 'html.parser')
convert_rare_characters(soup, rare_chars_dict)
captions = soup.find_all('h1')
for caption in captions:
splitted_caption = split_chapter_caption(caption)
# print(splitted_caption)
output.append(splitted_caption)
paragraphs = soup.find_all('p', class_='bodyContent')
for paragraph in paragraphs:
# print(paragraph.text)
output.append(paragraph.text)
return '\n'.join(output)
if __name__ == '__main__':
# load rare characters dictionary
rare_chars_dict = load_rare_chars("../data/raw/rare_characters.txt")
chapters = epub2thtml("../data/raw/sgyy.epub")
# print(len(chapters))
count = 0
for chapter in chapters:
# skip first 3 chapters
if count >= 3:
text = thtml2text(chapter, rare_chars_dict)
with open("../data/text/ch{:03d}.txt".format(count-2), 'w') as f:
f.write(text)
count += 1
``` |
{
"source": "JinpengLI/gpu_share_platform",
"score": 2
} |
#### File: cmachines_slave/cmachines_slave/bridge_manager.py
```python
from cmachines_slave.utils import exe_cmd_on_local
from cmachines_slave.utils import exe_cmd_on_remote
from cmachines_slave.utils import get_default_settings
from cmachines_slave.persistent_object import PersistentObject
from datetime import datetime
import uuid
import os
import json
class BridgeManager(PersistentObject):
def __init__(self,
mem_file,
remote_port_manager,
local_port_manager,
machine_manager,
client,
remote_login,
remote_host,
bridge_password,
):
super(BridgeManager, self).__init__(mem_file)
self.remote_port_manager = remote_port_manager
self.local_port_manager = local_port_manager
self.machine_manager = machine_manager
self.client = client
self.remote_login = remote_login
self.remote_host = remote_host
self.bridge_password = <PASSWORD>
def list_bridge_local_containers(self,):
self.load()
local_container_ids = []
bridges = self.data.get("bridges", {})
for key in bridges:
bridge = bridges[key]
local_container_id = bridge["local_container_id"]
local_container_ids.append(local_container_id)
return local_container_ids
def list_bridge_remote_containers(self,):
self.load()
remote_container_ids = []
bridges = self.data.get("bridges", {})
for key in bridges:
bridge = bridges[key]
remote_container_id = bridge["remote_container_id"]
remote_container_ids.append(remote_container_id)
return remote_container_ids
def add_machine(self, machine_id_on_site, local_ssh_port):
self.load()
if "machines" not in self.data:
self.data["machines"] = {}
self.data["machines"][machine_id_on_site] = {}
self.data["machines"][machine_id_on_site]["local_ssh_port"] = local_ssh_port
self.save()
self.build_bridge(local_ssh_port)
new_bridge = self.search_bridge(local_ssh_port)
if new_bridge is not None:
self.client.set_virtual_machine(
vm_name=machine_id_on_site,
host=self.remote_host,
port=new_bridge["forwarding_port"],
connection_info="Ready",
)
else:
raise ValueError("cannot build bridge for " + str(local_ssh_port))
def remove_machine(self, machine_id_on_site,):
self.load()
if "machines" not in self.data:
return
if machine_id_on_site not in self.data["machines"]:
raise ValueError("cannod find the machine")
local_service_port = self.data["machines"][machine_id_on_site]["local_ssh_port"]
if machine_id_on_site in self.data["machines"]:
self.data["machines"].pop(machine_id_on_site)
self.save()
self.remove_bridge(local_service_port)
self.save()
def clean_bridge(self, ):
self.load()
all_machine_ids_on_site = {}
local_machines = self.machine_manager.get_all_meta_machines()
for container_id in local_machines:
machine_id_on_site = local_machines[container_id]["machine_id_on_site"]
ssh_port = local_machines[container_id]["ssh_port"]
all_machine_ids_on_site[machine_id_on_site] = ssh_port
ports_to_rm = []
bridge_machines = []
machines_data = self.data.get("machines", {})
for machine_id_on_site in machines_data:
if machine_id_on_site in all_machine_ids_on_site:
if all_machine_ids_on_site[machine_id_on_site] != machines_data[machine_id_on_site]["local_ssh_port"]:
ports_to_rm.append(machines_data[machine_id_on_site]["local_ssh_port"])
if len(ports_to_rm) > 0:
for service_port in ports_to_rm:
print("remove bridge with service port ", service_port)
self.remove_bridge(service_port)
def update(self, ):
self.clean_bridge()
self.load()
## scan all the local machines
all_machine_ids_on_site = {}
local_machines = self.machine_manager.get_all_meta_machines()
for container_id in local_machines:
machine_id_on_site = local_machines[container_id]["machine_id_on_site"]
ssh_port = local_machines[container_id]["ssh_port"]
all_machine_ids_on_site[machine_id_on_site] = ssh_port
bridge_machines = []
machines_data = self.data.get("machines", {})
for machine_id_on_site in machines_data:
local_ssh_port = machines_data[machine_id_on_site]["local_ssh_port"]
#print("machine_id_on_site=", machine_id_on_site)
#print("local_ssh_port=", local_ssh_port)
if self.check_bridge_if_exist(local_ssh_port):
bridge_machines.append(machine_id_on_site)
## remove bridges
machines_to_rm = list(set(bridge_machines) - set(all_machine_ids_on_site.keys()))
if len(machines_to_rm) > 0:
print("BridgeManager machines_to_rm:", machines_to_rm)
## add bridge machine
machines_to_add = list(set(all_machine_ids_on_site.keys()) - set(bridge_machines))
if len(machines_to_add) > 0:
print("BridgeManager machines_to_add:", machines_to_add)
for machine_to_rm in machines_to_rm:
self.remove_machine(machine_to_rm)
for machine_to_add in machines_to_add:
self.add_machine(machine_to_add, all_machine_ids_on_site[machine_to_add])
def search_bridge(self, local_service_port):
self.load()
#cmd_fmt_stop = "docker stop %(container_id)s"
#cmd_fmt_rm = "docker rm %(container_id)s"
bridges = self.data.get("bridges", {})
keys_to_remove = []
for bridge_key in bridges:
if bridges[bridge_key]["local_service_port"] == local_service_port:
return bridges[bridge_key]
return None
def remove_bridge(self, local_service_port):
self.load()
cmd_fmt_stop = "docker stop %(container_id)s"
cmd_fmt_rm = "docker rm %(container_id)s"
bridges = self.data.get("bridges", {})
keys_to_remove = []
for bridge_key in bridges:
if bridges[bridge_key]["local_service_port"] == local_service_port:
local_containder_id = bridges[bridge_key]["local_container_id"]
remote_container_id = bridges[bridge_key]["remote_container_id"]
forwarding_port = bridges[bridge_key]["forwarding_port"]
your_sshd_port = bridges[bridge_key]["your_sshd_port"]
cmd = cmd_fmt_stop % {"container_id": local_containder_id}
ret, msg = exe_cmd_on_local(cmd, ret_msg=True)
cmd = cmd_fmt_rm % {"container_id": local_containder_id}
ret, msg = exe_cmd_on_local(cmd, ret_msg=True)
cmd = cmd_fmt_stop % {"container_id": remote_container_id}
ret, msg = exe_cmd_on_remote(self.remote_login, self.remote_host, cmd, ret_msg=True)
cmd = cmd_fmt_rm % {"container_id": remote_container_id}
ret, msg = exe_cmd_on_remote(self.remote_login, self.remote_host, cmd, ret_msg=True)
self.remote_port_manager.release_port(forwarding_port)
self.remote_port_manager.release_port(your_sshd_port)
keys_to_remove.append(bridge_key)
for key_to_remove in keys_to_remove:
bridges.pop(key_to_remove)
self.data["bridges"] = bridges
self.save()
def check_bridge_if_exist(self, local_service_port):
bridges = self.data.get("bridges", {})
#print("bridges=", bridges)
#print("local_service_port=", local_service_port)
for key in bridges:
bridge = bridges[key]
if bridge["local_service_port"] == local_service_port:
return True
return False
def start_bridge_if_exist(self, local_service_port):
self.load()
bridges = self.data.get("bridges", {})
for key in bridges:
bridge = bridges[key]
if bridge["local_service_port"] == local_service_port:
local_container_id = bridge["local_container_id"]
remote_container_id = bridge["remote_container_id"]
cmd_remote = "docker start %s" % remote_container_id
ret, msg = exe_cmd_on_remote(self.remote_login,
self.remote_host,
cmd_remote,
ret_msg=True)
if ret != 0:
print("fail to execute ", cmd_remote)
cmd_local = "docker start %s" % local_container_id
ret, msg = exe_cmd_on_local(cmd_local, ret_msg=True)
if ret != 0:
print("fail to execute ", cmd_local)
return True
return False
def build_bridge(self, local_service_port):
'''
see https://github.com/JinpengLI/docker-image-reverse-ssh-tunnel
return remote port
'''
if self.start_bridge_if_exist(local_service_port):
#print("debug build_bridge already exist")
return
self.load()
forwarding_port = self.remote_port_manager.allocate_port() ## open service so it is called forwarding port
your_sshd_port = self.remote_port_manager.allocate_port() ## ssh server listening
data = {}
data["remote_login"] = self.remote_login
data["remote_host"] = self.remote_host
data["your_sshd_port"] = your_sshd_port
data["forwarding_port"] = forwarding_port
data["bridge_password"] = self.bridge_password
data["local_service_port"] = local_service_port
cmd = "docker run -d -e ROOT_PASS=%(bridge_password)s -p %(your_sshd_port)d:22 -p %(forwarding_port)d:1080 jinpengli/docker-image-reverse-ssh-tunnel"
cmd = cmd % data
ret, msg = exe_cmd_on_remote(self.remote_login, self.remote_host, cmd, ret_msg=True)
if ret != 0:
print("fail cmd:", cmd)
return None
remote_container_id = msg.strip()
cmd = "docker run -d -e PUBLIC_HOST_ADDR=%(remote_host)s -e PUBLIC_HOST_PORT=%(your_sshd_port)d -e ROOT_PASS=%(bridge_password)s -e PROXY_PORT=%(local_service_port)d --net=host jinpengli/docker-image-reverse-ssh-tunnel"
cmd = cmd % data
ret, msg = exe_cmd_on_local(cmd, ret_msg=True)
if ret != 0:
print("fail cmd:", cmd)
return None
local_container_id = msg.strip()
bridges = self.data.get("bridges", {})
bridge_key = (str(local_container_id) + '_' + str(remote_container_id))
bridges[bridge_key] = {}
bridges[bridge_key]["created_time"] = datetime.now().isoformat()
bridges[bridge_key]["your_sshd_port"] = your_sshd_port
bridges[bridge_key]["forwarding_port"] = forwarding_port
bridges[bridge_key]["local_service_port"] = local_service_port
bridges[bridge_key]["local_container_id"] = local_container_id
bridges[bridge_key]["remote_container_id"] = remote_container_id
self.data["bridges"] = bridges
self.save()
if __name__ == "__main__":
from cmachines_slave.port_manager import PortManager
settings = get_default_settings()
working_dir = settings["local_data_dir"]
local_available_ports = settings["local_available_ports"]
local_available_ports = range(local_available_ports[0], local_available_ports[1], 1)
machine_manager_mem_file = os.path.join(working_dir, "machine_manager.json")
local_machine_port_mem_file = os.path.join(working_dir, "local_machine_ports.json")
port_manager = PortManager(
local_available_ports,
local_machine_port_mem_file)
bridge_manager = BridgeManager()
```
#### File: cmachines_slave/cmachines_slave/utils.py
```python
import subprocess
import sys, traceback
import json
import os
from subprocess import CalledProcessError
def get_default_settings():
dir_path = os.path.dirname(os.path.realpath(__file__))
default_settings_path = os.path.join(dir_path, "../config/crontab_update_machines.json")
default_settings = json.load(open(default_settings_path, "r"))
return default_settings
def exe_cmd_on_local(cmd, ret_msg=False):
if isinstance(cmd, basestring):
cmd = cmd.split(" ")
if ret_msg:
try:
out = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
)
return 0, out
except CalledProcessError as exc:
#print "Exception in user code:"
#print '-'*60
#var = traceback.format_exc()
#traceback.print_exc(file=sys.stdout)
#print '-'*60
#print exc.output
return exc.returncode, exc.output
else:
ret = subprocess.call(cmd)
return ret
def exe_cmd_on_remote(remote_login, remote_host, cmd, ret_msg=False):
new_cmd = ["ssh", "%s@%s" % (remote_login, remote_host), cmd]
ret = exe_cmd_on_local(new_cmd, ret_msg)
return ret
def make_port_mapping_from_remote_to_local_port(remote_login, remote_host,
your_sshd_port, forwarding_port,
bridge_password,
local_server_port, ):
'''
see https://github.com/JinpengLI/docker-image-reverse-ssh-tunnel
'''
data = {}
data["remote_login"] = remote_login
data["remote_host"] = remote_host
data["your_sshd_port"] = your_sshd_port
data["forwarding_port"] = forwarding_port
data["bridge_password"] = bridge_password
data["local_server_port"] = local_server_port
## public server configuration
cmd = "docker run -d -e ROOT_PASS=%(<PASSWORD> -p %(your_sshd_port)d:22 -p %(forwarding_port)d:1080 jinpengli/docker-image-reverse-ssh-tunnel"
cmd = cmd % data
print("cmd ", cmd)
ret = exe_cmd_on_remote(remote_login, remote_host, cmd)
if ret != 0:
print("fail to start docker on remote machine %s" % remote_host)
return ret
cmd = "docker run -d -e PUBLIC_HOST_ADDR=%(remote_host)s -e PUBLIC_HOST_PORT=%(your_sshd_port)d -e ROOT_PASS=%(bridge_<PASSWORD> -e PROXY_PORT=%(local_server_port)d --net=host jinpengli/docker-image-reverse-ssh-tunnel"
cmd = cmd % data
print("cmd ", cmd)
ret = exe_cmd_on_local(cmd)
if ret != 0:
print("fail to start docker on local machine " )
return ret
return 0
``` |
{
"source": "jinPrelude/Americano_lab",
"score": 2
} |
#### File: Americano_lab/DDPG_Pendulum/main.py
```python
import tensorflow as tf
import numpy as np
import gym
from collections import deque
import argparse
import pprint as pp
from gym import wrappers
from agent import ActorNetwork, CriticNetwork
from replay_buffer import ReplayBuffer
from OU_Noise import OrnsteinUhlenbeckActionNoise
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def train(sess, env, args, actor, critic, actor_noise):
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
sess.run(tf.global_variables_initializer())
#generate tensorboard
writer = tf.summary.FileWriter(args['summary_dir'], sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
saver = tf.train.Saver()
# Initialize replay memory
replay_buffer = ReplayBuffer(int(args['buffer_size']), int(args['random_seed']))
reward_mean = deque(maxlen=10)
for i in range(int(args['max_episodes'])):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(int(args['max_episode_len'])):
if args['render_env']:
env.render()
if args['record_video'] :
wrappers.Monitor(env, './results/video/', force=True)
# uo-process 노이즈 추가
# add uo-process noise
a = actor.predict(np.reshape(s, (1, actor.s_dim))) + actor_noise()
s2, r, terminal, info = env.step(a[0])
# replay buffer에 추가
# Add to replay buffer
replay_buffer.add(np.reshape(s, (actor.s_dim,)), np.reshape(a, (actor.a_dim,)), r,
terminal, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > int(args['minibatch_size']):
s_batch, a_batch, r_batch, t_batch, s2_batch = \
replay_buffer.sample_batch(int(args['minibatch_size']))
# Calculate targets
target_q = critic.predict_target(
s2_batch, actor.predict_target(s2_batch))
y_i = []
for k in range(int(args['minibatch_size'])):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + critic.gamma * target_q[k])
# Update the critic given the targets
predicted_q_value, _ = critic.train(
s_batch, a_batch, np.reshape(y_i, (int(args['minibatch_size']), 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_outs)
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if terminal:
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_ave_max_q / float(j)
})
writer.add_summary(summary_str, i)
writer.flush()
print('| Reward: {:d} | Episode: {:d} | Qmax: {:.4f}'.format(int(ep_reward), \
i, (ep_ave_max_q / float(j))))
reward_mean.append(ep_reward)
break
if i > 10 :
reward_reduce_mean = int(sum(reward_mean)/len(reward_mean))
if args['record_mean'] :
print('Terminate')
break
if reward_reduce_mean > -300 :
if args['record_video'] == False :
print('record_video is false')
#parser.set_defaults(render_env = True)
#parser.set_defaults(record_video=True)
args['record_video'] = True
args['render_env'] = True
saver.save(sess, './results/model_save/model.ckpt')
def main(args):
with tf.Session() as sess:
env = gym.make(args['env'])
np.random.seed(int(args['random_seed']))
tf.set_random_seed(int(args['random_seed']))
env.seed(int(args['random_seed']))
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_bound = env.action_space.high
# Ensure action bound is symmetric
assert (env.action_space.high == -env.action_space.low)
actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
float(args['actor_lr']), float(args['tau']),
int(args['minibatch_size']))
critic = CriticNetwork(sess, state_dim, action_dim,
float(args['critic_lr']), float(args['tau']),
float(args['gamma']),
actor.get_num_trainable_vars())
actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_dim))
train(sess, env, args, actor, critic, actor_noise)
if __name__ == '__main__':
# print the parameters on the console
# and also offer the parametes to the main function
parser = argparse.ArgumentParser(description='provide arguments for DDPG agent')
parser.add_argument('--actor-lr', help='actor network learning rate', default=0.0001)
parser.add_argument('--critic-lr', help='critic network learning rate', default=0.001)
parser.add_argument('--gamma', help='discount factor for critic updates', default=0.99)
parser.add_argument('--tau', help='soft target update parameter', default=0.001)
parser.add_argument('--buffer-size', help='max size of the replay buffer', default=1000000)
parser.add_argument('--minibatch-size', help='size of minibatch for minibatch-SGD', default=64)
# run parameters
parser.add_argument('--env', help='choose the gym env- tested on {Pendulum-v0}', default='Pendulum-v0')
parser.add_argument('--random-seed', help='random seed for repeatability', default=1234)
parser.add_argument('--max-episodes', help='max num of episodes to do while training', default=50000)
parser.add_argument('--max-episode-len', help='max length of 1 episode', default=1000)
parser.add_argument('--render-env', help='render the gym env', action='store_true')
parser.add_argument('--summary-dir', help='directory for storing tensorboard info', default='./results/tf_ddpg')
parser.add_argument('--record-video', default=False)
parser.set_defaults(render_env=False)
args = vars(parser.parse_args())
pp.pprint(args)
main(args)
``` |
{
"source": "jinPrelude/eye_tracking",
"score": 3
} |
#### File: eye_tracking/eye_tracking/only_drawing.py
```python
import cv2
import numpy as np
import os, sys
drawing = False
drawn = False
img = None
img2 = None
def draw_rect(event, x, y, flags, param) :
global drawing, ix, iy, center_x, center_y, width, height, drawn, img, img2
if event == cv2.EVENT_LBUTTONDOWN :
drawing = True
ix, iy = x, y
elif event == cv2.EVENT_MOUSEMOVE :
if drawing :
cv2.rectangle(img, (ix, iy), (x, y), (0,255,0), 1)
cv2.imshow('test', img)
img = img2.copy()
elif event == cv2.EVENT_LBUTTONUP :
drawing = False
drawn = True
cv2.rectangle(param, (ix, iy), (x, y), (0, 255, 0), 1)
center_x, center_y = int((x + ix)/2), int((y + iy)/2)
width, height = int((x - ix)), int((y - iy))
def draw_rectangle(last_num) :
global center_x, center_y, width, height, drawn, list, list2, img, img2
if last_num :
j = last_num
print("j : %d"%j)
else :
j = 0
list = []
list2 = []
while True :
name = 'dataset/%d.jpg'%j
img = cv2.imread(name)
print('name : ', name)
cv2.namedWindow('test')
cv2.moveWindow('test', 800, 0)
cv2.setMouseCallback('test', draw_rect, param=img)
while True :
try:
cv2.imshow('test', img)
except :
print('no image')
final_num = str(j)
fo.write(final_num)
# np.savetxt(fo, final_num, fmt="%d")
sys.exit()
print('no image')
final_num = str(j)
fo.write(final_num)
# np.savetxt(fo, final_num, fmt="%d")
sys.exit()
img2 = img.copy()
k = cv2.waitKey(0)
if k:
if k & 0xFF == ord('q'):
print('brak')
final_num = str(j)
fo.write(final_num)
#np.savetxt(fo, final_num, fmt="%d")
break
else :
break
if drawn:
"""
pt1 = (int(center_x - (width / 2)), int(center_y + (height / 2)))
pt2 = (int(center_x + (width / 2)), int(center_y - (height / 2)))
list = np.array([[j, pt1[0], pt1[1], pt2[0], pt2[1]]])
"""
list = ([center_x, center_y, width, height])
print('list : ', list)
np.savetxt(f, list, fmt='%d', delimiter=',')
j += 1
cv2.destroyAllWindows()
if __name__ == "__main__" :
last_num = None
try:
fo = open('dataset/last_num.txt', 'r')
print('mode r')
except:
fo = open('dataset/last_num.txt', 'w')
print('mode w')
if fo.mode == 'r':
num = fo.readline()
fo = open('dataset/last_num.txt', 'w')
if num=='':
last_num = 0
print("last_num : %d" % last_num)
else:
last_num = int(num)
print("last_num : %d" % last_num)
else:
last_num = 0
f = open('dataset/eyesPos.csv', 'a+')
draw_rectangle(last_num)
```
#### File: eye_tracking/eye_tracking/record_makeDataset.py
```python
import numpy as np
import cv2
import sys
import os
recordStart = False
recordEnd = False
drawing = False
drawn = False
img = None
img2 = None
center_x, center_y, width, height = None, None, None, None
def onMouse(event, x, y, flags, param) :
global recordStart, recordEnd
if event == cv2.EVENT_LBUTTONDOWN :
print('record start')
recordStart = True
elif event == cv2.EVENT_LBUTTONUP :
recordStart = False
recordEnd = True
def draw_rect(event, x, y, flags, param) :
global drawing, ix, iy, center_x, center_y, width, height, drawn, img, img2
if event == cv2.EVENT_LBUTTONDOWN :
drawing = True
ix, iy = x, y
elif event == cv2.EVENT_MOUSEMOVE :
if drawing :
cv2.rectangle(img, (ix, iy), (x, y), (0,255,0), 1)
cv2.imshow('test', img)
img = img2.copy()
elif event == cv2.EVENT_LBUTTONUP :
drawing = False
drawn = True
cv2.rectangle(param, (ix, iy), (x, y), (0, 255, 0), 1)
center_x, center_y = int((x + ix)/2), int((y + iy)/2)
width, height = int((x - ix)), int((y - iy))
def writeVideo(last_num) :
global recordStart, recordEnd
try :
print('camera start')
cap = cv2.VideoCapture(0)
except :
print('camera setting failed')
if last_num :
j = last_num
print("j : %d"%j)
else :
j = 0
while True :
ret, frame = cap.read()
if not ret :
print('video reading error')
break
cv2.imshow('video', frame)
cv2.setMouseCallback('video', onMouse)
if recordStart:
cv2.imwrite('dataset/' +'%d.jpg'%j, frame)
j += 1
if cv2.waitKey(1) & 0xFF == ord('q') :
pass
elif recordEnd == True :
print('finish record')
recordEnd = False
break
cap.release()
cv2.destroyAllWindows()
def draw_rectangle(last_num) :
global center_x, center_y, width, height, drawn, list, list2, img, img2
if last_num :
j = last_num
print("j : %d"%j)
else :
j = 0
list = np.array([0,0,0,0,0])
list2 = np.array([0,0,0,0,0])
while True :
name = 'dataset/%d.jpg'%j
img = cv2.imread(name)
print('name : ', name)
cv2.namedWindow('test')
#cv2.moveWindow('test', -10, -10)
cv2.setMouseCallback('test', draw_rect, param=img)
while True :
try:
cv2.imshow('test', img)
except :
print('no image')
final_num = str(j)
fo.write(final_num)
#np.savetxt(fo, final_num, fmt="%d")
sys.exit()
img2 = img.copy()
k = cv2.waitKey(0)
if k:
if k & 0xFF == ord('q'):
print('brak')
final_num = str(j)
fo.write(final_num)
#np.savetxt(fo, final_num, fmt="%d")
break
else :
break
if drawn:
"""
pt1 = (int(center_x - (width / 2)), int(center_y + (height / 2)))
pt2 = (int(center_x + (width / 2)), int(center_y - (height / 2)))
list = np.array([[j, pt1[0], pt1[1], pt2[0], pt2[1]]])
"""
list = np.array([[center_x, center_y, width, height]])
print('list : ', list)
np.savetxt(f, list, fmt='%d', newline='\n')
#f.write("%d,%d,%d,%d,%d\n"%(j, pt1[0], pt1[1], pt2[0], pt2[1]))
j += 1
cv2.destroyAllWindows()
if __name__ == "__main__" :
last_num = None
try:
fo = open('dataset/last_num.txt', 'r')
print('mode r')
except:
fo = open('dataset/last_num.txt', 'w')
print('mode w')
if fo.mode == 'r' :
num = fo.readline()
fo = open('dataset/last_num.txt', 'w')
if num == 0 :
last_num = 0
print("last_num : %d"%last_num)
else :
last_num = int(num)
print("last_num : %d"%last_num)
else :
last_num = 0
f = open('dataset/eyesPos.csv', 'a+')
#os.chdir('/home/leejin/git/eye_tracking/eye-tracking')
writeVideo(last_num)
will = input('do you want to draw rectangles right now? yes : y, no : n')
if (will == 'y') :
draw_rectangle(last_num)
elif(will == 'n') :
print("last_num : ", last_num)
last_num = str(last_num)
fo.write(last_num)
fo.close()
``` |
{
"source": "jinPrelude/kerbal-rl",
"score": 3
} |
#### File: kerbal-rl/kerbal_rl/env.py
```python
import krpc
import time
import numpy as np
"""
Kerbal Space Program reinforcement learning environment
Author : <NAME>
github : https://github.com/jinPrelude/kerbal-rl
"""
# hover_v0 returns continuous reward
class hover_v0:
def __init__(self, sas=True, max_altitude = 500, max_step=100, interval = 0.085):
self.conn = krpc.connect(name='hover')
self.vessel = self.conn.space_center.active_vessel
self.step_count = 0
self.done = False
self.reward = 0
self.interval = interval
self.max_altitude = max_altitude
self.observation_space = 1
# Action space : 0.0 ~ 1.0 (Thrust ratio)
self.action_space = 1
self.action_max = 1.
self.action_min = -1.
self.initial_throttle = self.action_min
# Initializing
self.sas = sas
self.target_altitude = 100
self.relative_goal = self.target_altitude
self.max_step = max_step
# reset() returns target_altitude, while step() don't.
def reset(self):
# Quicksave initial state
self.conn.space_center.quicksave()
# Initialize sas
self.vessel.control.sas = self.sas
# Initialize throttle
self.vessel.control.throttle = self.initial_throttle
# Set target altitude
self.target_altitude = np.random.randint(100, self.max_altitude)
print('Target altitude : ', self.target_altitude)
self.relative_goal -= self.vessel.flight().mean_altitude
self.step_count = 0
self.done = False
self.reward = 0
# Launch
self.vessel.control.activate_next_stage()
return [self.vessel, self.vessel.flight(), self.relative_goal]
def step(self, action):
self.decision(action)
if self.step_count >= self.max_step :
# Revert to launch pad when the step is reached to the max_step.
self.done = True
self.conn.space_center.quickload()
else :
self.step_count += 1
# Return the reward is given proportion to the distance between the target altitude & current altitude
# and inverse proportion to the speed of the vehicle.
self.reward = -0.6 * abs(self.vessel.flight().mean_altitude - self.target_altitude) + \
-0.4 * abs(self.vessel.flight().speed)
self.relative_goal = self.target_altitude - self.vessel.flight().mean_altitude
time.sleep(self.interval)
# obs, reward, done
return [self.vessel, self.vessel.flight(), self.relative_goal], self.reward, self.done, []
# Return action
def decision(self, action):
action = action[0] * 0.5 + 0.5
self.vessel.control.throttle = float(action)
def sample_action_space(self):
return np.random.uniform(-1, 1, 1)
# hover_v1 returns sparse reward
class hover_v1:
def __init__(self, sas=True, max_altitude = 1000, max_step=100, epsilon=1, interval = 0.085):
self.conn = krpc.connect(name='hover')
self.vessel = self.conn.space_center.active_vessel
self.step_count = 0
self.done = False
self.reward = 0
self.interval = interval
self.max_altitude = max_altitude
self.observation_space = 1
# error tolerance(meter).
# If epsilon is 1 and target_altitude is 100m, the error tolerance is between 99m and 101m
self.epsilon = epsilon
# Action space : 0.0 ~ 1.0 (Thrust ratio)
self.action_space = 1
self.action_max = 1.
self.action_min = 0.0
self.initial_throttle = self.action_min
# Initializing
self.sas = sas
self.target_altitude = 100
self.max_step = max_step
def reset(self):
# Quicksave initial state
self.conn.space_center.quicksave()
# Initialize sas
self.vessel.control.sas = self.sas
# Initialize throttle
self.vessel.control.throttle = self.initial_throttle
# Set target altitude
self.target_altitude = np.random.randint(100, self.max_altitude)
print('Target altitude : ', self.target_altitude)
self.step_count = 0
self.done = False
self.reward = 0
# Launch
self.vessel.control.activate_next_stage()
return self.vessel.thrust, self.vessel.mass, self.target_altitude
def step(self, action):
self.decision(action)
if self.step_count >= self.max_step :
# Revert to launch pad
self.done = True
self.conn.space_center.quickload()
else :
self.step_count += 1
# Return the reward if the current altitude is between the error tolerance and the speed is 0.
if (self.vessel.flight().mean_altitude <= self.target_altitude + self.epsilon) and \
(self.vessel.flight().mean_altitude >= self.target_altitude - self.epsilon) and \
abs(self.vessel.flight().speed) < 0.01 :
self.reward = 1
else : self.reward = 0
time.sleep(self.interval)
# obs, reward, done
return (self.vessel.thrust, self.vessel.mass, self.target_altitude), self.reward, self.done
# Return action
def decision(self, action):
self.vessel.control.throttle = float(action[0])
envs = {
'hover_v0' : hover_v0,
'hover_v1' : hover_v1
}
def make(id) :
return envs[id]
``` |
{
"source": "jinPrelude/MiniCNNEnv",
"score": 3
} |
#### File: MiniCNNEnv/minicnnenv/eatapple.py
```python
import gym
from gym import spaces
import numpy as np
import random
class EatApple(gym.Env):
def __init__(self, grid_size = [10, 10]):
self.rgb_dict = {'agent': [0, 0, 128], 'none': [0, 0, 0], 'apple': [255, 255, 255]}
self.agent_pos = None
self.apple_num = 3
self.apple_pos_list = None
self.max_step = 300
self.current_step = 0
assert isinstance(grid_size, list) and len(grid_size) == 2
self.grid_size = grid_size
self.grid = np.zeros((self.grid_size[0], self.grid_size[1], 3)) # rgb
self.action_space = spaces.Discrete(5)
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(self.grid_size[0], self.grid_size[1], 3),
dtype='uint8'
)
def _move_agent(self, action):
self.grid = self._draw(self.grid, self.rgb_dict['none'], self.agent_pos)
if action == 0:
pass
elif action == 1: # left
self.agent_pos[1] = self.agent_pos[1] - 1 if self.agent_pos[1] - 1 > 0 else 0
elif action == 2: # right
self.agent_pos[1] = self.agent_pos[1] + 1 if self.agent_pos[1] + 1 < self.grid_size[1]-1 else self.grid_size[1]-1
elif action == 3: # up
self.agent_pos[0] = self.agent_pos[0] - 1 if self.agent_pos[0] - 1 > 0 else 0
elif action == 4: # down
self.agent_pos[0] = self.agent_pos[0] + 1 if self.agent_pos[0] + 1 < self.grid_size[0]-1 else self.grid_size[0]-1
else:
print("wrong action")
self.grid = self._draw(self.grid, self.rgb_dict['agent'], self.agent_pos)
@staticmethod
def _draw(grid, rgb_code, pos):
grid[pos[0], pos[1], 0] = rgb_code[0]
grid[pos[0], pos[1], 1] = rgb_code[1]
grid[pos[0], pos[1], 2] = rgb_code[2]
return grid
@staticmethod
def _sample_apple_pos(grid_size, apple_num, agent_pos):
pos_range = list(range(0, grid_size[0] * grid_size[1]))
del pos_range[agent_pos[1]*grid_size[1] + agent_pos[0]] # grid_size[1]*9 + grid_size[0] : agent reset pos
agent_pos_list = random.sample(pos_range, apple_num)
apple_xy_pos = []
for pos in agent_pos_list:
apple_xy_pos.append([pos//grid_size[1], pos % grid_size[0]])
return apple_xy_pos
def _judge_eat_apple(self):
if self.agent_pos in self.apple_pos_list:
idx = self.apple_pos_list.index(self.agent_pos)
del self.apple_pos_list[idx]
return (1, True) if len(self.apple_pos_list) == 0 else (1, False)
else:
return (0, False)
def step(self, action):
self.current_step += 1
self._move_agent(action)
reward, done = self._judge_eat_apple()
done = True if self.current_step >= self.max_step else done
return self.grid, reward, done, {}
def reset(self):
self.current_step = 0
# initialize agent position.
self.agent_pos = [self.grid_size[1]-1, self.grid_size[0]//2]
self._move_agent(0) # to draw agent
self.apple_pos_list = self._sample_apple_pos(self.grid_size, self.apple_num, self.agent_pos)
for apple_pos in self.apple_pos_list:
self.grid = self._draw(self.grid, self.rgb_dict['apple'], apple_pos)
return self.grid
def _render(self, mode='human', close=False):
pass
def _take_action(self, action):
pass
def _get_reward(self):
pass
if __name__=="__main__":
env = EatApple()
s = env.reset()
print(s.transpose(2, 0, 1)[-1])
d = False
while not d:
a = int(input())
s, r, d, _ = env.step(a)
print(s.transpose(2, 0, 1)[-1])
print(f"reward: {r}\tdone: {d}")
print()
print("done!")
``` |
{
"source": "jinPrelude/pyNeat",
"score": 2
} |
#### File: jinPrelude/pyNeat/builder.py
```python
from envs import *
from networks.neat.network import NeatNetwork
from learning_strategies import *
from loops.loops import ESLoop
def build_env(config, unity_worker_id):
if config["name"] in ["simple_spread", "waterworld", "multiwalker"]:
return PettingzooWrapper(config["name"], config["max_step"])
elif "Unity" in config["name"]:
if "CollectApple" in config["name"]:
return UnityCollectAppleWrapper(config["name"], unity_worker_id, config["max_step"])
elif config["name"] in ["AndOps"]:
return AndOps()
else:
return GymWrapper(config["name"], config["max_step"], config["pomdp"])
def build_network(config):
if config["name"] == "NeatNetwork":
return NeatNetwork(
config["num_state"],
config["num_action"],
config["discrete_action"],
config["init_mu"],
config["init_std"],
config["mutate_std"],
config["max_weight"],
config["min_weight"],
config["probs"],
)
def build_loop(
config,
network,
agent_ids,
env_name,
gen_num,
n_workers,
eval_ep_num,
log,
save_model_period,
):
strategy_cfg = config["strategy"]
if strategy_cfg["name"] == "neat":
strategy = Neat(
strategy_cfg["offspring_num"],
strategy_cfg["crossover_ratio"],
strategy_cfg["champions_num"],
strategy_cfg["survival_ratio"],
strategy_cfg["c1"],
strategy_cfg["c3"],
strategy_cfg["delta_threshold"],
)
return ESLoop(
config,
strategy,
agent_ids,
env_name,
network,
gen_num,
n_workers,
eval_ep_num,
log,
save_model_period,
)
```
#### File: pyNeat/envs/gym_wrapper.py
```python
import gym
import pybullet_envs
gym.logger.set_level(40)
class GymWrapper:
def __init__(self, name, max_step=None, pomdp=False):
self.env = gym.make(name)
if pomdp:
if "LunarLander" in name:
print("POMDP LunarLander")
self.env = LunarLanderPOMDP(self.env)
elif "CartPole" in name:
print("POMDP CartPole")
self.env = CartPolePOMDP(self.env)
else:
raise AssertionError(f"{name} doesn't support POMDP.")
self.max_step = max_step
self.curr_step = 0
self.name = name
def reset(self):
self.curr_step = 0
return_list = {}
transition = {}
s = self.env.reset()
transition["state"] = s
return_list["0"] = transition
return return_list
def seed(self, seed):
self.env.seed(seed)
def step(self, action):
self.curr_step += 1
return_list = {}
transition = {}
s, r, d, info = self.env.step(action["0"])
if self.max_step != "None":
if self.curr_step >= self.max_step or d:
d = True
transition["state"] = s
transition["reward"] = r
transition["done"] = d
transition["info"] = info
return_list["0"] = transition
return return_list, r, d, info
def get_agent_ids(self):
return ["0"]
def render(self, mode):
return self.env.render(mode=mode)
def close(self):
self.env.close()
class LunarLanderPOMDP(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
def observation(self, obs):
# modify obs
obs[2] = 0
obs[3] = 0
obs[5] = 0
return obs
class CartPolePOMDP(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
def observation(self, obs):
# modify obs
obs[1] = 0
obs[3] = 0
return obs
```
#### File: learning_strategies/neat/neat_utils.py
```python
from itertools import combinations
import numpy as np
def mutate_offsprings(offsprings):
for off in offsprings:
off.mutate()
return offsprings
def crossover_offsprings(parents, rewards, offspring_num, delta_dict, delta_threshold):
offsprings = []
parents, rewards = sort_offsprings_rewards(parents, rewards)
p = np.arange(1, len(parents) + 1)[::-1] / sum(range(len(parents) + 1))
while len(offsprings) < offspring_num:
p1_idx, p2_idx = np.random.choice(range(len(parents)), 2, p=p, replace=False)
if delta_dict[(p1_idx, p2_idx)] > delta_threshold:
continue
p1, p2 = parents[p1_idx], parents[p2_idx]
if rewards[p1_idx] < rewards[p2_idx]:
child = p1.crossover(p2)
elif rewards[p1_idx] > rewards[p2_idx]:
child = p2.crossover(p1)
else:
child = p1.crossover(p2, draw=True)
offsprings.append(child)
return offsprings
def calculate_adjusted_fitness(offsprings, rewards, delta_threshold, delta_dict):
# regularize rewards
max_r = max(rewards)
min_r = min(rewards)
new_rewards = []
for i in range(len(rewards)):
new_rewards.append((rewards[i] - min_r) / (max_r - min_r))
rewards = new_rewards
# calculate adjusted fitness
adjusted_fitness = []
pass_score = []
for i in range(len(offsprings)):
same_speices_fitnesses = []
fitness = rewards[i]
same_speices_num = 0
for j in range(len(offsprings)):
if i == j:
continue
elif delta_dict[(i, j)] < delta_threshold:
same_speices_fitnesses.append(rewards[j])
same_speices_num += 1
new_fitness = fitness / same_speices_num
adjusted_fitness.append(new_fitness)
pass_score.append(fitness - mean(same_speices_fitnesses))
return adjusted_fitness, pass_score
def get_delta_dict(offsprings, c1, c3):
delta_list = {}
diversity_score = 0
parent_indices = [i for i in range(len(offsprings))]
parent_combs = list(combinations(parent_indices, 2))
for (i, j) in parent_combs:
delta_list[(i, j)] = calculate_delta(offsprings[i], offsprings[j], c1, c3)
diversity_score += delta_list[(i, j)] / 1e6
delta_list[(j, i)] = delta_list[(i, j)]
return delta_list, diversity_score
def calculate_delta(p1, p2, c1, c3):
p1_genes = p1.genome.get_connect_genes()
p2_genes = p2.genome.get_connect_genes()
p1_connect_keys = set(p1_genes.keys())
p2_connect_keys = set(p2_genes.keys())
all_genes_num = len(p1_connect_keys & p2_connect_keys)
diff_genes_num = len(set.symmetric_difference(p1_connect_keys, p2_connect_keys))
weight_diff = 1
if (len(p1_genes) + len(p2_genes)) / 2 > 20:
p1_weight_avg = get_weights_average(p1)
p2_weight_avg = get_weights_average(p2)
weight_diff = abs(p1_weight_avg - p2_weight_avg)
delta = (c1 * diff_genes_num) / all_genes_num
delta += c3 * weight_diff
return delta
def get_weights_average(neat_network):
# for delta calculation
connect_genes = neat_network.genome.get_connect_genes()
weights = []
for gene in connect_genes.values():
weights.append(gene.weight)
return mean(weights)
def pick_by_pass_score(offsprings, adjusted_fitness, pass_scores, survival_num):
# pick offsprings which fitness score is higher than speices' average.
pass_num = len([i for i in pass_scores if i > 0])
if pass_num < survival_num:
# sort the offsprings by pass_scores if pass_num is insufficient
rank_id = np.flip(np.argsort(pass_scores))
survivals = [offsprings[i] for i in rank_id]
survivals_rewards = [adjusted_fitness[i] for i in rank_id]
else:
survivals = []
survivals_rewards = []
for i, pass_score in enumerate(pass_scores):
if pass_score > 0:
survivals.append(offsprings[i])
survivals_rewards.append(adjusted_fitness[i])
return survivals, survivals_rewards
def sort_offsprings_rewards(offsprings, rewards):
rank_id = np.flip(np.argsort(rewards))
sorted_offsprings = [offsprings[i] for i in rank_id]
sorted_rewards = [rewards[i] for i in rank_id]
return sorted_offsprings, sorted_rewards
def mean(x_list):
if len(x_list) == 0:
return 0
else:
return sum(x_list) / len(x_list)
``` |
{
"source": "jinPrelude/reinforcement_learning",
"score": 3
} |
#### File: pytorch_rl/ksp_her/ksp.py
```python
from pytorch_rl.ksp_her.hover import hover_v1
import argparse
from pytorch_rl.ksp_her.algorithm import DDPG_HER
def main(args) :
env = hover_v1(max_altitude=300, max_step=100, epsilon=1)
action_dim = env.action_space
state_dim = env.observation_space
goal_dim = env.goal_space
max_step = 100 # max episode len
print('action_dim : ', action_dim)
print('state_dim : ', state_dim)
ddpg = DDPG_HER(args, state_dim, action_dim, goal_dim, max_step)
ddpg.ksp_train_loop(env)
if __name__ == '__main__' :
parse = argparse.ArgumentParser()
parse.add_argument('--batch_size', default=200)
parse.add_argument('--lr', default=0.003)
parse.add_argument('--epsilon_decay', default=0.9)
parse.add_argument('--gamma', default=0.99)
parse.add_argument('--target_replace_iter', default=100)
parse.add_argument('--tau', default=0.001)
parse.add_argument('--memory_capacity', default=10000)
parse.add_argument('--env_epsilon', default=1.)
parse.add_argument('--num_episode', default=10000)
parse.add_argument('--episode_len', default=100)
parse.add_argument('--ep_print_iter', default=1, help='print episode_reward at every %d step')
parse.add_argument('--model_save_iter', default=100, help='save model at every %d step')
parse.add_argument('--continue_training', default=False,
help='Will you continue training using your saved model & memory')
parse.add_argument('--saved_iter', default=220, help='last saved model iteration number. ')
parse.add_argument('--save_directory', default='./save/')
args = parse.parse_args()
main(args)
```
#### File: pytorch_rl/kuka/kuka_ddpg.py
```python
import pybullet as p
from pybullet_envs.bullet import kukaGymEnv
import argparse
from pytorch_rl.kuka.algorithm import DDPG
def main(args) :
p.connect(p.SHARED_MEMORY)
env = kukaGymEnv.KukaGymEnv(renders=True, isEnableSelfCollision=True)
action_dim = env.action_space.shape[0]
state_dim = env.observation_space.shape[0]
print('action_dim : ', action_dim)
print('state_dim : ', state_dim)
ddpg = DDPG(args, state_dim, action_dim)
ddpg.kuka_train_loop(env)
if __name__ == '__main__' :
parse = argparse.ArgumentParser()
parse.add_argument('--batch_size', default=200)
parse.add_argument('--lr', default=0.003)
parse.add_argument('--epsilon_decay', default=0.99)
parse.add_argument('--gamma', default=0.99)
parse.add_argument('--target_replace_iter', default=100)
parse.add_argument('--tau', default=0.001)
parse.add_argument('--memory_capacity', default=10000)
parse.add_argument('--num_episode', default=10000)
parse.add_argument('--episode_len', default=600)
parse.add_argument('--ep_print_iter', default=1, help='print episode_reward at every %d step')
parse.add_argument('--model_save_iter', default=100, help='save model at every %d step')
parse.add_argument('--continue_training', default=False,
help='Will you continue training using your saved model & memory')
parse.add_argument('--saved_iter', default=220, help='last saved model iteration number. ')
parse.add_argument('--save_directory', default='./kuka/save_ddpg/')
args = parse.parse_args()
main(args)
``` |
{
"source": "jinPrelude/rl_algorithms",
"score": 2
} |
#### File: rl_algorithms/Cnn_TD3_PyTorch /td3_main.py
```python
import gym
import torch
import tensorboardX
from agents import TD3
import argparse
import os
import utils
import numpy as np
def main(args):
env = gym.make(args['env_name'])
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
action_dim = env.action_space.shape[0]
max_action = env.action_space.high[0]
state_dim = env.observation_space.shape[0]
td3 = TD3(args, action_dim, max_action, state_dim, device)
summary = tensorboardX.SummaryWriter('./log/{}_td3_{}'.format(args['env_name'], args['noise_type']))
timestep = 0
for episode in range(args['max_episode']):
episode_reward = 0
state = env.reset()
state = utils.init_state(state)
while True:
if timestep < args['random_action_timestep'] :
select = env.action_space.sample()
action = utils.carRace_action_to_output(select)
else :
action = td3.get_action(state)
select = utils.carRace_output_to_action(action)
tmp_reward = 0
for i in range(4):
tmp_next_state, reward, done, info = env.step(select)
tmp_reward += reward
tmp_next_state = utils.preprocess(tmp_next_state)
tmp_next_state = tmp_next_state[np.newaxis, np.newaxis, :, :]
next_state = np.append(tmp_next_state, state[:, :3, :, :], axis=1)
# show_state(next_state)
td3.save(state, action[0], tmp_reward, next_state, int(done))
episode_reward += tmp_reward
state = next_state.copy()
timestep += 1
if timestep > args['train_start_timestep']:
if timestep % 2 == 0 :
td3.train(summary, timestep)
if done:
print('episode: ', episode, ' reward : %.3f'%(episode_reward), ' timestep :', timestep)
summary.add_scalar('reward/timestep', episode_reward, timestep)
break
if episode % args['save_freq'] == 0:
if not os.path.exists('./SaveModel') :
os.mkdir('./SaveModel')
torch.save(td3.actor.state_dict(), './SaveModel/{}_td3_{}_{}'.format(args['env_name'], args['noise_type'], episode))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=0)
parser.add_argument('--env-name', default='CarRacing-v0')
parser.add_argument('--env-seed', default=0)
parser.add_argument('--render', default=False, type=bool)
parser.add_argument('--evaluate', default=False, type=bool)
parser.add_argument('--model-directory', default='./SaveModel/Pendulum-v0_210', type=str)
parser.add_argument('--max-episode', default=1000000)
parser.add_argument('--save-freq', default=50)
parser.add_argument('--actor-lr', default=3e-4)
parser.add_argument('--critic-lr', default=1e-3)
parser.add_argument('--gamma', default=0.99)
parser.add_argument('--memory-size', default=350000)
parser.add_argument('--noise_type', default='gaussian')
parser.add_argument('--noise-delta', default=0.1)
parser.add_argument('--batch-size', default=32)
parser.add_argument('--train-start-timestep', default=2000)
parser.add_argument('--random-action-timestep', default=100)
parser.add_argument('--tau', default=5e-3)
args = vars(parser.parse_args())
main(args)
```
#### File: rl_algorithms/DQN_PyTorch/main.py
```python
import gym
import torch
import tensorboardX
import random
import numpy as np
from agents import Double_DQN_Cnn
import argparse
import os
from utils import preprocess, init_state
def main(args):
env = gym.make(args['env_name'])
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
action_dim = env.action_space.n
state_dim = env.observation_space.shape[0]
dqn = Double_DQN_Cnn(args, state_dim, action_dim, device)
summary = tensorboardX.SummaryWriter('./log/{}_{}'.format(args['env_name'], 'double_dqn'))
timestep = 0
for episode in range(args['max_episode']):
episode_reward = 0
state = env.reset()
state = init_state(state)
while True:
if args['random_action_timestep'] > timestep :
select = env.action_space.sample()
else :
select = dqn.get_action(state)
tmp_state = state.copy()
for i in range(4) :
next_state, reward, done, info = env.step(select)
if i == 3 : break
next_state = preprocess(tmp_state, next_state)
tmp_state = next_state
# env.render()
next_state = preprocess(tmp_state, next_state)
dqn.save(state, select, reward, next_state, int(done))
episode_reward += reward
state = next_state
timestep += 1
if timestep % 10 == 0 :
dqn.update_target()
if timestep > args['replay_start_size']: # BATCH_SIZE(64) 이상일 때 부터 train 시작
if timestep % args['skip'] == 0 :
dqn.train()
if done:
if episode % 1 == 0 :
print('episode: ', episode, ' reward : %.3f'%(episode_reward), ' timestep :', timestep, ' epsilon :', dqn.epsilon)
summary.add_scalar('reward/timestep', episode_reward, timestep)
break
if episode % args['save_freq'] == 0:
if not os.path.exists('./SaveModel') :
os.mkdir('./SaveModel')
torch.save(dqn.model.state_dict(), './SaveModel/{}_{}_{}'.format(args['env_name'], 'dqn', episode))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--env-name', default='BreakoutDeterministic-v4')
parser.add_argument('--env-seed', default=0)
parser.add_argument('--render', default=False, type=bool)
parser.add_argument('--evaluate', default=False, type=bool)
parser.add_argument('--model-directory', default='./SaveModel/Pendulum-v0_210', type=str)
parser.add_argument('--max-episode', default=10000000)
parser.add_argument('--save-freq', default=1000)
parser.add_argument('--lr', default=0.001)
parser.add_argument('--gamma', default=0.99)
parser.add_argument('--memory-size', default=800000)
parser.add_argument('--batch-size', default=16)
parser.add_argument('--random-action-timestep', default=30000)
parser.add_argument('--skip', default=4)
parser.add_argument('--replay-start-size', default=50000)
args = vars(parser.parse_args())
main(args)
```
#### File: rl_algorithms/QRDQN_PyTorch/agents.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch.optim as optim
from memory import Memory
from torch.autograd import Variable
import random
from utils import preprocess
class QRDQN_Model(nn.Module) :
def __init__(self, state_dim, action_dim, num_quant):
nn.Module.__init__(self)
self.num_quant = num_quant
self.action_dim = action_dim
self.layer1 = nn.Linear(state_dim, 256)
self.layer2 = nn.Linear(256, action_dim * num_quant)
def forward(self, x):
x = self.layer1(x)
x = torch.tanh(x)
x = self.layer2(x)
return x.view(-1, self.action_dim, self.num_quant)
class QRDQN() :
def __init__(self, args, state_dim, action_dim, device):
self.state_dim = state_dim
self.action_dim = action_dim
self.device = device
self.memory = Memory(args['memory_size'])
self.model = QRDQN_Model(state_dim, action_dim, args['num_quant']).to(self.device)
self.target_model = QRDQN_Model(state_dim, action_dim, args['num_quant']).to(self.device)
self.model_optimizer = optim.Adam(self.model.parameters(), lr=args['lr'])
self.tau = torch.Tensor((2 * np.arange(args['num_quant']) + 1) / (2.0 * args['num_quant'])).view(1, -1).to(self.device)
self.target_model.load_state_dict(self.model.state_dict())
self.epsilon = 1.0
self.args = args
def update_target(self):
self.target_model.load_state_dict(self.model.state_dict())
def get_action(self, state):
if np.random.rand() <= self.epsilon :
self.epsilon *= 0.999
return random.randrange(self.action_dim)
else :
state = torch.Tensor(state).to(self.device)
action = self.model(state).mean(2).max(1)[1]
return int(action)
def get_real_action(self, state):
state = torch.Tensor(state).to(self.device)
action = self.model(state).mean(2).max(1)[1]
return int(action)
def save(self, state, action, reward, next_state ,done):
self.memory.add((state, action, reward, next_state, done))
def huber(self, x, k=1.0):
return torch.where(x.abs() < k, 0.5 * x.pow(2), k * (x.abs() - 0.5 * k))
def train(self):
batch_size = self.args['batch_size']
mini_batch = self.memory.sample(batch_size)
mini_batch = np.array(mini_batch).transpose()
states = np.vstack(mini_batch[0])
actions = list(mini_batch[1])
rewards = list(mini_batch[2])
next_states = np.vstack(mini_batch[3])
dones = mini_batch[4].astype(int)
states = torch.Tensor(states).to(self.device)
next_states = torch.Tensor(next_states).to(self.device)
rewards = torch.Tensor(rewards).to(self.device)
rewards = torch.unsqueeze(rewards, 1)
dones = torch.Tensor(dones).to(self.device)
dones = torch.unsqueeze(dones, 1)
states = Variable(states).float()
# one-hot encoding
a = torch.LongTensor(actions)
net_action = self.model(states)
net_action = net_action[np.arange(batch_size), actions]
target_action = self.target_model(next_states).detach()
target_select = target_action.mean(2).max(1)
target_action = target_action[np.arange(batch_size), target_select[1]]
target = rewards + (1 - dones) * self.args['gamma'] * target_action
target = target.t().unsqueeze(-1)
diff = target - net_action
loss = self.huber(diff) * (self.tau - (diff.detach() < 0).float()).abs()
loss = loss.mean()
self.model_optimizer.zero_grad()
loss.backward()
self.model_optimizer.step()
```
#### File: rl_algorithms/QRDQN_PyTorch/memory.py
```python
import random
import numpy as np
from collections import deque
# random
class Memory :
def __init__(self, memory_size):
self.memory = deque(maxlen=memory_size)
self.memory_counter = 0
def add(self, sample) :
self.memory.append(sample)
self.memory_counter += 1
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
```
#### File: rl_algorithms/TD3_PyTorch/evaluate.py
```python
import gym
import torch
import tensorboardX
from agents import TD3
import argparse
import os
def main(args):
env = gym.make(args['env_name'])
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
action_dim = env.action_space.shape[0]
max_action = env.action_space.high[0]
state_dim = env.observation_space.shape[0]
td3 = TD3(args, action_dim, max_action, state_dim, device)
trained_actor = torch.load(args['model_directory'])
td3.actor.load_state_dict(trained_actor)
timestep = 0
for episode in range(args['max_episode']):
episode_reward = 0
state = env.reset()
while True:
action = td3.get_action(state)
next_state, reward, done, info = env.step(action)
env.render()
episode_reward += reward
state = next_state
timestep += 1
if done:
print('episode: ', episode, ' reward : %.3f'%(episode_reward), ' timestep :', timestep)
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=0)
parser.add_argument('--env-name', default='LunarLanderContinuous-v2')
parser.add_argument('--env-seed', default=0)
parser.add_argument('--render', default=True, type=bool)
parser.add_argument('--evaluate', default=False, type=bool)
parser.add_argument('--model-directory', default='./SaveModel/LunarLanderContinuous-v2_td3_gaussian_900', type=str)
parser.add_argument('--max-episode', default=5000)
parser.add_argument('--save-freq', default=100)
parser.add_argument('--actor-lr', default=0.001)
parser.add_argument('--critic-lr', default=0.001)
parser.add_argument('--gamma', default=0.99)
parser.add_argument('--noise-timestep', default=10000)
parser.add_argument('--memory-size', default=200000)
parser.add_argument('--noise_type', default='none')
parser.add_argument('--noise-delta', default=1.0)
parser.add_argument('--batch-size', default=32)
parser.add_argument('--random-action-timestep', default=3000)
parser.add_argument('--tau', default=1e-3)
args = vars(parser.parse_args())
main(args)
``` |
{
"source": "jinPrelude/simple-es",
"score": 3
} |
#### File: learning_strategies/evolution/utils.py
```python
from copy import deepcopy
def wrap_agentid(agent_ids, network):
group = {}
for agent_id in agent_ids:
group[agent_id] = deepcopy(network)
return group
```
#### File: simple-es/networks/abstracts.py
```python
from abc import *
from torch import nn
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
@abstractmethod
def zero_init(self):
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def get_param_list(self):
pass
@abstractmethod
def apply_param(self, param_lst: list):
pass
```
#### File: simple-es/networks/neural_network.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from .abstracts import BaseNetwork
class GymEnvModel(BaseNetwork):
def __init__(self, num_state=8, num_action=4, discrete_action=True, gru=True):
super(GymEnvModel, self).__init__()
self.num_action = num_action
self.fc1 = nn.Linear(num_state, 32)
self.use_gru = gru
if self.use_gru:
self.gru = nn.GRU(32, 32)
self.h = torch.zeros([1, 1, 32], dtype=torch.float)
self.fc2 = nn.Linear(32, num_action)
self.discrete_action = discrete_action
def forward(self, x):
with torch.no_grad():
x = torch.from_numpy(x).float()
x = x.unsqueeze(0)
x = torch.tanh(self.fc1(x))
if self.use_gru:
x, self.h = self.gru(x, self.h)
x = torch.tanh(x)
x = self.fc2(x)
if self.discrete_action:
x = F.softmax(x.squeeze(), dim=0)
x = torch.argmax(x)
else:
x = torch.tanh(x.squeeze())
x = x.detach().cpu().numpy()
return x
def reset(self):
if self.use_gru:
self.h = torch.zeros([1, 1, 32], dtype=torch.float)
def zero_init(self):
for param in self.parameters():
param.data = torch.zeros(param.shape)
def get_param_list(self):
param_list = []
for param in self.parameters():
param_list.append(param.data.numpy())
return param_list
def apply_param(self, param_lst: list):
count = 0
for p in self.parameters():
p.data = torch.tensor(param_lst[count]).float()
count += 1
``` |
{
"source": "jinpyojoo/school-py",
"score": 3
} |
#### File: school-py/school/parser.py
```python
import requests, re
from bs4 import BeautifulSoup
import datetime
class School:
def __init__(self, region, code):
if region is None or code is None:
raise Exception("지역 또는 학교코드가 비어있습니다.")
self.region = region
self.code = code
today = datetime.datetime.today()
def getMeal(self, year=today.year, month=today.month, day=today.day, code=2):
weekday = datetime.date(year=year, month=month, day=day).weekday()
schMmealScCode = code
month = str(month).zfill(2)
year = str(year).zfill(2)
day = str(day).zfill(2)
if self.region in ["sen", "pen", "dge", "ice", "gen", "dje", "use", "sje", "goe", "kwe", "cbe", "cne", "jbe", "jne", "gbe", "gne", "jje"]:
num = weekday + 1
if not self.region == 'gbe':
URL = (
"http://stu."+self.region+".go.kr/sts_sci_md01_001.do?"
f"schulCode={self.code}"
"&schulCrseScCode=4"
"&schulKndScCode=04"
"&schMmealScCode=%d&schYmd=%s" % (schMmealScCode, str(year)+str(month)+str(day))
)
else:
URL = (
"http://stu.gbe.kr/sts_sci_md01_001.do?"
f"schulCode={self.code}"
"&schulCrseScCode=4"
"&schulKndScCode=04"
"&schMmealScCode=%d&schYmd=%s" % (schMmealScCode, str(year)+str(month)+str(day))
)
html = ""
resp = requests.get(URL)
if resp.status_code == 200 :
html = resp.text
soup = BeautifulSoup(html, 'html.parser')
element_data = soup.find_all("tr")
element_data = element_data[2].find_all('td')
try:
element = str(element_data[num])
element_filter = ['[', ']', '<td class="textC last">', '<td class="textC">', '</td>', '&', '(h)', '.']
for element_string in element_filter :
element = element.replace(element_string, '')
element = re.sub(r"\d", "", element)
element = element.split('<br/>')
element = list(filter(None, element))
except Exception as ex:
element = []
else:
raise Exception("지역 코드가 맞지 않습니다.")
return element
``` |
{
"source": "jinq0123/conan-lua-intf",
"score": 2
} |
#### File: jinq0123/conan-lua-intf/conanfile.py
```python
from conans import ConanFile
class LuaintfConan(ConanFile):
name = "lua-intf"
version = "0.1"
license = "MIT"
url = "https://github.com/jinq0123/conan-lua-intf"
description = "A binding between C++11 and Lua language"
# No settings/options are necessary, this is header only
def source(self):
self.run("git clone --depth=1 https://github.com/SteveKChiu/lua-intf.git")
def package(self):
self.copy("*", dst="include", src="lua-intf")
def package_id(self):
self.info.header_only()
``` |
{
"source": "JinqiaoGit/DeBERTa-NER",
"score": 3
} |
#### File: experiments/citi_ner/dataset.py
```python
from typing import List, Dict
import torch
from torch.utils.data import Dataset
import numpy as np
from constant import PADDING_LABEL, LABEL2IX
from data_utils import get_word2ix
class DocDataset(Dataset):
def __init__(
self, sentences: List[List[str]],
labels: List[List[str]],
word2ix: Dict[str, int] = None,
padding_size: int = None,
label2ix: Dict[str, int] = LABEL2IX):
# set word to idx dictionary
if not word2ix and isinstance(sentences, list) and isinstance(labels, list):
self.word2ix = get_word2ix([(sent, label) for sent, label in zip(sentences, labels)])
else:
self.word2ix = word2ix
if not sentences or not labels:
raise ValueError("Empty input and output of dataset, please check your data")
self.sentences = sentences
self.labels = labels
self.padding_size = padding_size
self.label2ix = label2ix
self.train_data, self.train_label = None, None
# padding dataset
self._dataset_padding()
# convert array to torch Tensor
self.train_data, self.train_label = torch.LongTensor(self.train_data), torch.LongTensor(self.train_label)
def __len__(self):
"""
return the number of sentence
"""
return len(self.sentences)
def __getitem__(self, idx):
"""
return the batch item based on the idx
"""
return self.train_data[idx], self.train_label[idx]
def _dataset_padding(self):
"""
padding all sentences and labels based on the max length of sentence.
notice that each sentence and label has the same length,
the padding size is human defined, which should be in the list [64, 128, 256, 512, ...]
"""
if not isinstance(self.padding_size, int):
max_sentence_len = max(len(sent) for sent in self.sentences)
# find the minimum padding size
for interval in [64, 128, 256, 512]:
if max(interval, max_sentence_len) == interval:
self.padding_size = interval
break
if interval == 512:
self.padding_size = interval
break
# padding train data with padding label, which is index 0 in my settings
self.train_data = self.word2ix[PADDING_LABEL] * np.ones((len(self.sentences), self.padding_size))
# padding label data with -1
self.train_label = -1 * np.ones((len(self.sentences), self.padding_size))
# copy the data to numpy array
for sent_idx, sent in enumerate(self.sentences):
sent_length = len(sent)
self.train_data[sent_idx][:sent_length] = [self.word2ix[token] for token in sent]
self.train_label[sent_idx][:sent_length] = [self.label2ix[label] for label in self.labels[sent_idx]]
```
#### File: experiments/citi_ner/data_utils.py
```python
from typing import Dict, List, Tuple
import torch
def get_word2ix(trainset: List[Tuple[List[str], List[str]]]) -> Dict[str, int]:
"""
generate one-hot code of tokens
:param trainset: a list of tuple contains tokens and labels
:return: a dict contains the map between token and index
"""
# set <PAD> label as idx 0
word_to_ix: Dict[str, int] = {"<PAD>": 0}
for sentence, _ in trainset:
for word in sentence:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
return word_to_ix
def prepare_sequence(seq: List[str], to_ix: Dict[str, int], device='cpu') -> torch.Tensor:
"""
convert sequential word to the index in one-hot dictionary.
"""
idxs = [[to_ix[w] for w in seq]]
return torch.tensor(idxs, dtype=torch.long, device=device)
def data_refactor():
pass
```
#### File: experiments/citi_ner/loss_function.py
```python
import torch
def cross_entropy_loss(outputs: torch.LongTensor, labels: torch.LongTensor):
"""
This is the cross entropy loss function
:param outputs:
this is the output of model
:param labels:
this is the ground truth of the token's label
:return:
the loss array based on cross-entropy
"""
# reshape labels to give a flat vector with length batch_size*seq_len
labels = labels.reshape(-1)
# mask out '<PAD>' tokens
mask = (labels >= 0).float()
# the number of tokens is the sum of elements in mask
num_tokens = int(torch.sum(mask).data)
# pick the values corresponding to labels and multiply by mask
outputs = outputs[range(outputs.shape[0]), labels] * mask
# cross entropy loss for all non <PAD> tokens
return -torch.sum(outputs) / num_tokens
``` |
{
"source": "Jinqi-Cheng/delivery_pal_server",
"score": 2
} |
#### File: delivery_pal_server/accounts/views.py
```python
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
# Create your views here.
from .models import Restaurant
def index(request):
return render(request, 'index.html')
@login_required
def dashboard(request):
return redirect('restautant/dashboard.html')
@login_required
def profile(request):
user = request.user
if user.is_authenticated:
if user.is_superuser:
return redirect('/admin/')
else:
restaurant = Restaurant.objects.get(user_id = user.id)
return render(request, 'profile.html', {'restaurant': restaurant})
else:
return redirect('/accounts/login/',{'message':'Wrong password Please Try agagin'})
``` |
{
"source": "jin-qin/cs655-image-recognition",
"score": 2
} |
#### File: cs655-image-recognition/tests/start_test.py
```python
import os
import requests
import json
import dateutil.parser
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from util import load_ground_truth, get_sysnset_map, is_predict_correct
IMG_DIR = '/opt/client/data/val_img'
GROUND_TRUTH_FILE = './data/ILSVRC2012_validation_ground_truth.txt'
IMGNET_META_FILE = './data/meta.mat'
SYNSET_WORDS_MAP_FILE = './data/synset_words.txt'
HOST = 'pcvm3-23.instageni.cenic.net'
PORT = 5000
def date2ts(iso_date):
'''
parse date format like '2020-12-08T11:39:47.110Z' to a timestamp
'''
dt = dateutil.parser.isoparse(iso_date)
return int(time.mktime(dt.timetuple()))
def test_code(imgs_paths, gt, synste_map):
'''
run testing logics
try uploading all images inside imgs_paths
'''
imgs_idx_map = {}
valid_http_req = 0
total_req = len(imgs_paths)
# set maxmimum http pool size
sess = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_maxsize=100)
sess.mount('http://', adapter)
url_submit = 'http://{}:{}/jobs/submit'.format(HOST, PORT)
print('start uploading {} images ...'.format(len(imgs_paths)))
ts_req_st = time.time()
for idx, img_path in enumerate(imgs_paths):
print('uploading {}'.format(img_path), end='\r')
data = {'image':open(img_path,'rb')}
# if failed (timeout, status code not 201 etc.), skip
# only count valid http requets here, used to compute goodput
try:
res = sess.post(url=url_submit, files=data, timeout=3)
if res.status_code == 201:
job_id = res.json()['id']
imgs_idx_map[job_id] = idx
valid_http_req = valid_http_req + 1
except:
time.sleep(5)
req_duration = time.time() - ts_req_st
print('') # new line
print('upload finished')
# checking if all jobs finished every 5 seconds
url_check_all_finished = 'http://{}:{}/jobs/all_finished'.format(HOST, PORT)
check_count = 0
while True:
try:
res = sess.get(url_check_all_finished, timeout=3)
if res.status_code != 200: continue
all_finished = res.json()
print('checking if all jobs finished: {}, count: {}'.format(all_finished, check_count), end='\r')
check_count = check_count + 1
if all_finished['finished'] == True:
print('') # new line
print('all jobs finished!')
break
except:
pass
time.sleep(5)
# all job finished, get all jobs results
url_get_all_jobs =' http://{}:{}/jobs/all'.format(HOST, PORT)
while True:
try:
res = sess.get(url_get_all_jobs, timeout=3)
if res.status_code != 200: continue
break
except:
time.sleep(5)
print('got all the jobs\'results')
# compute statisticss
all_data = res.json()
avg_times = []
preds = []
for row in all_data['data']:
if row['finish_time'] is None: continue
if row['schedule_time'] is None: continue
try:
img_idx = imgs_idx_map[row['job_id']]
finish_time = date2ts(row['finish_time'])
schedule_time = date2ts(row['schedule_time'])
submit_time = date2ts(row['submit_time'])
pred_label = synset_map[int(row['result'])]
preds.append(is_predict_correct(gt, img_idx, pred_label))
duration = finish_time - schedule_time # second
avg_times.append(duration)
except:
pass
avg_time = np.mean(avg_times)
accuracy = np.mean(preds)
return avg_time, accuracy, valid_http_req, total_req, req_duration
if __name__ == '__main__':
# get all images in the image directory
imgs_paths = os.listdir(IMG_DIR)
imgs_paths = sorted(imgs_paths, key = lambda path: int(os.path.splitext(path)[0][-8:]))
imgs_paths = [IMG_DIR + '/{}'.format(path) for path in imgs_paths]
gt = load_ground_truth(GROUND_TRUTH_FILE)
synset_map = get_sysnset_map(IMGNET_META_FILE, SYNSET_WORDS_MAP_FILE)
loss_rates = np.linspace(0, 0.6, num=10) # X-axis
ts_st = time.time()
goodputs = [] # Y-axis
throughputs = [] # Y-axis
avg_times = [] # Y-axis
accuracies = [] # Y-axis
for loss in loss_rates:
print('----------------------------------------------------------')
print("start a new turn on loss rate: {:.2f}".format(loss * 100))
url = 'http://{}:{}/jobs/clear'.format(HOST, PORT)
res = requests.delete(url)
avg_time, accuracy, valid_http_req, total_http_req, req_duration = test_code(imgs_paths[:100], gt, synset_map)
goodput = valid_http_req / req_duration
throughput = total_http_req / req_duration
avg_times.append(avg_time)
accuracies.append(accuracy)
goodputs.append(goodput)
throughputs.append(throughput)
print('average job execution time: {} sec'.format(avg_time))
print('accuracy: {:.2f} %'.format(accuracy * 100))
print('valid requests: {}'.format(valid_http_req))
print('total requests: {}'.format(total_http_req))
print('goodput: {} / sec'.format(goodput))
print('throughput: {} / sec'.format(throughput))
print("finish this turn")
print('----------------------------------------------------------')
input('Enter any key to start next turn...')
total_runtime = time.time() - ts_st
# save all data into csv files.
import csv
with open('results.csv', 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=' ', quotechar='|')
for i in range(len(loss_rates)):
csvwriter.writerow([loss_rates[i], goodputs[i], throughputs[i], accuracies[i], avg_times[i], total_runtime])
```
#### File: cs655-image-recognition/tests/util.py
```python
def load_ground_truth(gt_file: str):
ground_truth = []
with open(gt_file, 'r') as f:
for idx, line in enumerate(f):
ground_truth.append(int(line))
return ground_truth
def load_imagenet_meta(meta_file: str):
import scipy.io
mat = scipy.io.loadmat(meta_file)
return mat['synsets']
def get_sysnset_map(meta_file: str, synset_words_mapping_file: str):
'''
since the predicted label from model is not the same as the synsets id in imagenet
we have to map the label to the synsets id
this function will return the map of <model label, imagenet id>
'''
metadata = load_imagenet_meta(meta_file)
d = metadata[:, 0]
wnid_map = {}
for r in d:
if r[0][0][0] > 1000: continue
wnid_map[r[1][0]] = r[0][0][0]
synset_map = {-1: -1}
import csv
with open(synset_words_mapping_file, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for id, line in enumerate(csvreader):
id_imgnet = wnid_map[line[0]]
synset_map[id] = id_imgnet
return synset_map
def get_synset_details_map(meta_file: str, synset_words_mapping_file: str):
metadata = load_imagenet_meta(meta_file)
d = metadata[:, 0]
wnid_map = {}
category = {}
desc = {}
for r in d:
if r[0][0][0] > 1000: continue
wnid_map[r[1][0]] = r[0][0][0]
category[r[1][0]] = r[2][0]
desc[r[1][0]] = r[3][0]
synset_map = {-1: {'code': -1, 'desc': ''}}
import csv
with open(synset_words_mapping_file, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for id, line in enumerate(csvreader):
id_imgnet = wnid_map[line[0]]
synset_map[id] = { 'code': int(id_imgnet), 'cat':category[line[0]], 'desc': desc[line[0]]}
return synset_map
def is_predict_correct(ground_truth: list, img_idx: int, imgnet_label: int):
return ground_truth[img_idx] == imgnet_label
``` |
{
"source": "JinquanPeng/CS323-Compilers",
"score": 3
} |
#### File: project1/src/run_test.py
```python
import os
import hashlib
def getHash(f):
line=f.readline()
hash=hashlib.md5()
while(line):
hash.update(line)
line=f.readline()
return hash.hexdigest()
def IsHashEqual(f1,f2):
str1=getHash(f1)
str2=getHash(f2)
return str1==str2
if __name__ == '__main__':
cmds = []
ans = []
cmd = "./bin/splc"
for i in range(1,10):
file_name = cmd+ " ../test/test_1_r0"+str(i)+".spl > rst"
a = "../test/test_1_r0" + str(i) +".out"
cmds.append(file_name)
ans.append(a)
for i in range(10,13):
file_name =cmd+ " ../test/test_1_r"+str(i)+".spl > rst"
cmds.append(file_name)
a = "../test/test_1_r" + str(i) +".out"
ans.append(a)
for i in range(len(cmds)):
print("========"+ str(i+1) +"=======")
os.system(cmds[i])
f1=open("./rst","rb")
f2=open(ans[i],"rb")
print(IsHashEqual(f1,f2))
``` |
{
"source": "Jinr0h404/projet10",
"score": 3
} |
#### File: projet10/Favorite/models.py
```python
from django.db import models
from Product.models import Product
from User.models import CustomUser
# Create your models here.
class Favorites(models.Model):
"""this class is for the django orm, it gives the parameters for the
creation of the table of the same name in the psql database."""
substitute_id = models.ForeignKey(
Product, on_delete=models.RESTRICT, verbose_name="Substitut"
)
product_id = models.ForeignKey(
Product,
on_delete=models.RESTRICT,
related_name="bad_product",
verbose_name="Produit",
)
user_id = models.ForeignKey(
CustomUser, on_delete=models.CASCADE, verbose_name="Utilisateur"
)
def __str__(self):
return f"{self.substitute_id} | {self.product_id} | {self.user_id}"
```
#### File: management/commands/create_db.py
```python
from django.core.management.base import BaseCommand
from Product.models import Product, Category, Store
from .api_get import Api_get
class Command(BaseCommand):
help = "initialize database"
def handle(self, *args, **kwargs):
"""retrieve a list of products in JSON format through Open Food Fact
API. The loop goes through each element of the number of pages given,
checks if the main categories are correctly entered for the product
and creates a dictionary list."""
product = Api_get()
product_list = product.food()
for i in product_list:
new_product = Product.objects.create(
product_name=i["name"],
brand=i["brand"],
description=i["description"],
nutriscore=i["nutriscore"],
url=i["url"],
product_image=i["product_image"],
product_image_little=i["product_image_little"],
fat=str(i["fat"]),
saturated_fat=str(i["saturated_fat"]),
salt=str(i["salt"]),
sugar=str(i["sugar"]),
)
last_product = Product.objects.last()
prod_id = last_product.pk
for category in i["category"]:
"""the value of the category key in the dictionary can contain
several elements. I therefore loop on the category to fill my
table with get_or_create function of peewee to haven't a
duplicate"""
category = category.strip()
""" strip() remove spaces before and after item"""
new_category, created = Category.objects.get_or_create(
category_name=category
)
cat_id = new_category.pk
last_product.category.add(cat_id)
""" make a loop for each store"""
for store in i["store"]:
"""like category, the value of the store key in the dictionary
can contain several elements. loop to fill my table with the
get_or_create function"""
store = store.strip()
new_store, created = Store.objects.get_or_create(store_name=store)
store_id = new_store.pk
last_product.store.add(store_id)
```
#### File: management/commands/update_db.py
```python
from django.core.management.base import BaseCommand
from Product.models import Product, Category, Store
from .api_get import Api_get
class Command(BaseCommand):
help = "update database"
def handle(self, *args, **kwargs):
"""retrieve a list of products in JSON format through Open Food Fact
API. The loop goes through each element of the number of pages given,
checks if the main categories are correctly entered for the product
and creates a dictionary list."""
product = Api_get()
product_list = product.food()
for i in product_list:
if Product.objects.filter(product_name=i["name"], url=i["url"]):
old_product = Product.objects.get(product_name=i["name"], url=i["url"])
old_product.brand=i["brand"]
old_product.description=i["description"]
old_product.nutriscore=i["nutriscore"]
old_product.fat=str(i["fat"])
old_product.saturated_fat=str(i["saturated_fat"])
old_product.salt=str(i["salt"])
old_product.sugar=str(i["sugar"])
old_product.save()
```
#### File: projet10/Product/models.py
```python
from django.db import models
# Create your models here.
class Category(models.Model):
"""this class is for the django orm, it gives the parameters for the
creation of the table of the same name in the psql database."""
category_name = models.CharField(
max_length=200, unique=True, verbose_name="Catégorie"
)
def __str__(self):
return f"{self.category_name}"
class Store(models.Model):
"""this class is for the django orm, it gives the parameters for the
creation of the table of the same name in the psql database."""
store_name = models.CharField(max_length=100, unique=True, verbose_name="Magasin")
def __str__(self):
return f"{self.store_name}"
class Product(models.Model):
"""this class is for the django orm, it gives the parameters for the
creation of the table of the same name in the psql database."""
product_name = models.CharField(max_length=200, verbose_name="Produit")
product_image = models.URLField(null=True)
product_image_little = models.URLField(null=True)
brand = models.TextField(null=True, verbose_name="Marque")
description = models.TextField(null=True)
nutriscore = models.CharField(max_length=1)
fat = models.CharField(max_length=30, default="NC")
saturated_fat = models.CharField(max_length=30, default="NC")
salt = models.CharField(max_length=30, default="NC")
sugar = models.CharField(max_length=30, default="NC")
url = models.URLField(null=True)
category = models.ManyToManyField(Category)
store = models.ManyToManyField(Store)
def __str__(self):
return f"{self.product_name}"
```
#### File: projet10/Product/views.py
```python
from django.shortcuts import render, redirect, get_object_or_404
from Product.models import Product
from User.models import CustomUser
from Favorite.models import Favorites
from django.core.paginator import Paginator
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
# Create your views here.
def search(request):
"""get the keyword in query and do a search in the product name column for all products that contain this word.
uses paginator to create a presentation of 6 products per page"""
query = request.GET.get("query")
products_list = Product.objects.filter(product_name__icontains=query).order_by("id")
paginator = Paginator(products_list, 6)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
logger.info('New search', exc_info=True, extra={
# Optionally pass a request and we'll grab any information we can
'request': request,
})
context = {
"page_obj": page_obj,
"nom_produit": page_obj,
"paginate": True,
"query": query,
}
return render(request, "Product/search.html", context)
def count_to_dict(lst):
"""loop on the list of id's to create a dictionary of key = product id / value = count how many common categories"""
return {k: lst.count(k) for k in lst}
def substitute_getter(id_product):
"""We create a list of all the product objects having a common category with the requested product.
For each product of the object list a new list is filled with the corresponding id.
Returns a list of tuple (productID, nbrCategory in common) sorted in descending order of values nbr Common Category"""
product = get_object_or_404(Product, pk=id_product)
list_brut = Product.objects.filter(category__in=product.category.all()).exclude(
pk=id_product
)
list_order_value = []
for prod in list_brut:
list_order_value.append(prod.pk)
list_count_common = count_to_dict(list_order_value)
valeur_sub = sorted(list_count_common.items(), key=lambda x: x[1], reverse=True)
return valeur_sub
def search_substitute(request):
"""get the id in query and use substitute_getter function for returns a list of tuple (productID, nbrCategory
in common) and uses paginator to create a presentation of 6 products per page"""
query = request.GET.get("query")
query_id = Product.objects.filter(pk=query)
products_tuple = substitute_getter(query)
products_list = []
for i in products_tuple:
products_list.append(Product.objects.get(pk=i[0]))
paginator = Paginator(products_list, 6)
page_number = request.GET.get("page")
page_obj = paginator.get_page(page_number)
context = {
"page_obj": page_obj,
"nom_produit": page_obj,
"paginate": True,
"query": query,
"query_id": query_id,
}
return render(request, "Product/search_substitute.html", context)
def save_substitute(request):
"""get the id of product and id of his substitute in query with post method and save it with the id of connected
user. Redirect the user on the favorite page"""
query_substitute = request.POST["save"]
query_list = query_substitute.split(",")
if request.method == "POST":
query = Product.objects.get(pk=query_list[1])
user_connected = CustomUser.objects.get(pk=request.user.id)
substitute_id = Product.objects.get(pk=query_list[0])
favorite_substitute = Favorites.objects.create(
substitute_id=substitute_id,
product_id=query,
user_id=user_connected,
)
return redirect("/favoris")
def product_info(request, product_id):
"""get the id of product in the url. Display product info page with nutriscore"""
query = product_id
query_id = Product.objects.filter(pk=query)
context = {
"nom_produit": query_id,
"query": query,
"query_id": query_id,
}
return render(request, "Product/product_info.html", context)
```
#### File: projet10/User/models.py
```python
from django.db import models
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
# Create your models here.
class MyUserManager(BaseUserManager):
def create_user(self, email, username, password=None):
if not email:
raise ValueError("veuillez entrer un email")
user = self.model(
email=self.normalize_email(email),
username=username
)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, username, password=None):
user = self.create_user(email=email, username=username, password=password)
user.is_admin = True
user.is_staff = True
user.save()
return user
class CustomUser(AbstractBaseUser):
"""this class is for the django orm, it gives the parameters for the
creation of the table of the same name in the psql database."""
username = models.CharField(max_length=200, null=False, blank=False, verbose_name="Nom")
email = models.EmailField(max_length=45, unique=True, null=False, blank=False)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
objects = MyUserManager()
def has_perm(self, perm, obj=None):
return True
def has_module_perms(selfself, app_label):
return True
def __str__(self):
return f'{self.username} | {self.email}'
``` |
{
"source": "Jinr0h404/projet11",
"score": 3
} |
#### File: Product/tests/test_models.py
```python
import pytest
from Product.models import Product, Store, Category
@pytest.fixture
def product_fixture(db) -> Product:
"""creates the fixture for the test database with 3 products"""
product_list = [
{
"name": "nutella",
"store": "leclerc",
"category": ["pate", "pate à tartiner", "petit déjeuner", "chocolat"],
"nutriscore": "D",
"description": "petit déjeuner",
"fat": "0,145",
"saturated_fat": "0,145",
"salt": "0,145",
"sugar": "0,145",
},
{
"name": "lightella",
"store": "leclerc",
"category": ["pate"],
"nutriscore": "B",
"description": "petit déjeuner",
"fat": "0,14",
"saturated_fat": "0,04",
"salt": "0,02",
"sugar": "0,14",
},
{
"name": "nutalla",
"store": "leclerc",
"category": ["pate à tartiner", "petit déjeuner", "chocolat"],
"nutriscore": "B",
"description": "petit déjeuner",
"fat": "0,14",
"saturated_fat": "0,04",
"salt": "0,02",
"sugar": "0,14",
},
]
for i in product_list:
new_product = Product.objects.create(
product_name=i["name"],
nutriscore=i["nutriscore"],
fat=str(i["fat"]),
saturated_fat=str(i["saturated_fat"]),
salt=str(i["salt"]),
sugar=str(i["sugar"]),
)
last_product = Product.objects.last()
prod_id = last_product.pk
for category in i["category"]:
"""the value of the category key in the dictionary can contain
several elements. I therefore loop on the category to fill my
table with get_or_create function of peewee to haven't a
duplicate"""
category = category.strip()
""" strip() remove spaces before and after item"""
new_category, created = Category.objects.get_or_create(
category_name=category
)
cat_id = new_category.pk
last_product.category.add(cat_id)
""" make a loop for each store"""
for store in i["store"]:
"""like category, the value of the store key in the dictionary
can contain several elements. loop to fill my table with the
get_or_create function"""
store = store.strip()
new_store, created = Store.objects.get_or_create(store_name=store)
store_id = new_store.pk
last_product.store.add(store_id)
def test_product_model(product_fixture):
"""test that the product model records the product information in the database"""
product = Product.objects.get(product_name="nutella")
expected_value = "nutella"
assert str(product) == expected_value
@pytest.mark.django_db
def test_category_model():
"""test that the category model records the category information in the database"""
category = Category.objects.create(category_name="pate à tartiner")
expected_value = "pate à tartiner"
assert str(category) == expected_value
@pytest.mark.django_db
def test_store_model():
"""test that the store model records the store information in the database"""
store = Store.objects.create(store_name="auchan")
expected_value = "auchan"
assert str(store) == expected_value
``` |
{
"source": "jin/ray",
"score": 2
} |
#### File: serve/tests/test_task_runner.py
```python
import pytest
import ray
from ray.experimental.serve.queues import CentralizedQueuesActor
from ray.experimental.serve.task_runner import (
RayServeMixin,
TaskRunner,
TaskRunnerActor,
wrap_to_ray_error,
)
import ray.experimental.serve.context as context
def test_runner_basic():
def echo(i):
return i
r = TaskRunner(echo)
assert r(1) == 1
def test_runner_wraps_error():
wrapped = wrap_to_ray_error(Exception())
assert isinstance(wrapped, ray.exceptions.RayTaskError)
def test_runner_actor(serve_instance):
q = CentralizedQueuesActor.remote()
def echo(flask_request, i=None):
return i
CONSUMER_NAME = "runner"
PRODUCER_NAME = "prod"
runner = TaskRunnerActor.remote(echo)
runner._ray_serve_setup.remote(CONSUMER_NAME, q, runner)
runner._ray_serve_main_loop.remote()
q.link.remote(PRODUCER_NAME, CONSUMER_NAME)
for query in [333, 444, 555]:
result_token = ray.ObjectID(
ray.get(
q.enqueue_request.remote(
PRODUCER_NAME,
request_args=None,
request_kwargs={"i": query},
request_context=context.TaskContext.Python)))
assert ray.get(result_token) == query
def test_ray_serve_mixin(serve_instance):
q = CentralizedQueuesActor.remote()
CONSUMER_NAME = "runner-cls"
PRODUCER_NAME = "prod-cls"
class MyAdder:
def __init__(self, inc):
self.increment = inc
def __call__(self, flask_request, i=None):
return i + self.increment
@ray.remote
class CustomActor(MyAdder, RayServeMixin):
pass
runner = CustomActor.remote(3)
runner._ray_serve_setup.remote(CONSUMER_NAME, q, runner)
runner._ray_serve_main_loop.remote()
q.link.remote(PRODUCER_NAME, CONSUMER_NAME)
for query in [333, 444, 555]:
result_token = ray.ObjectID(
ray.get(
q.enqueue_request.remote(
PRODUCER_NAME,
request_args=None,
request_kwargs={"i": query},
request_context=context.TaskContext.Python)))
assert ray.get(result_token) == query + 3
def test_task_runner_check_context(serve_instance):
q = CentralizedQueuesActor.remote()
def echo(flask_request, i=None):
# Accessing the flask_request without web context should throw.
return flask_request.args["i"]
CONSUMER_NAME = "runner"
PRODUCER_NAME = "producer"
runner = TaskRunnerActor.remote(echo)
runner._ray_serve_setup.remote(CONSUMER_NAME, q, runner)
runner._ray_serve_main_loop.remote()
q.link.remote(PRODUCER_NAME, CONSUMER_NAME)
result_token = ray.ObjectID(
ray.get(
q.enqueue_request.remote(
PRODUCER_NAME,
request_args=None,
request_kwargs={"i": 42},
request_context=context.TaskContext.Python)))
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(result_token)
``` |
{
"source": "jinrenfit/torndsession",
"score": 3
} |
#### File: torndsession/demos/redis_session.py
```python
import tornado.web
import tornado.httpserver
import tornado.ioloop
from torndsession.sessionhandler import SessionBaseHandler
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/', MainHandler),
]
settings = dict(
debug=True,
)
session_settings = dict(
driver="redis",
driver_settings=dict(
host='localhost',
port=6379,
db=0,
max_connections=1024,
)
)
settings.update(session=session_settings)
tornado.web.Application.__init__(self, handlers, **settings)
class MainHandler(SessionBaseHandler):
def get(self):
self.write("Redis Session Example:<br/>")
if 'sv' in self.session:
sv = self.session["sv"]
else:
sv = 0
self.write('Current Session Value:%s' % sv)
self.session['sv'] = sv + 1
def main():
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
``` |
{
"source": "JinRIIS/Custom-PX4",
"score": 3
} |
#### File: libevents/scripts/validate.py
```python
import argparse
import json
import os
import sys
import re
import common
try:
from jsonschema import validate
except ImportError as e:
print("Failed to import jsonschema: " + str(e))
print("")
print("You may need to install it using:")
print(" pip3 install --user jsonschema")
print("")
sys.exit(1)
def dict_raise_on_duplicates(ordered_pairs):
"""Reject duplicate keys"""
return_dict = {}
for key, value in ordered_pairs:
if key in return_dict:
raise ValueError("duplicate key: {:}".format(key))
return_dict[key] = value
return return_dict
def main():
""" main method """
# Parse command line arguments
parser = argparse.ArgumentParser(description="Validate event definition files.")
parser.add_argument("files",
default=[],
metavar="EVENT.JSON",
nargs='+',
help="one or more event definition files")
parser.add_argument('-v', '--verbose',
action='store_true',
help='Verbose Output')
args = parser.parse_args()
input_files = args.files
verbose = args.verbose
cur_dir = os.path.dirname(os.path.realpath(__file__))
schema_file = os.path.join(cur_dir, '../validation/schema.json')
with open(schema_file, 'r', encoding='utf-8') as stream:
schema = json.load(stream)
# read configuration
config = common.read_config()
events = {}
for input_file in input_files:
if verbose:
print("Validating {:}".format(input_file))
with open(input_file, 'r', encoding='utf-8') as json_file:
events = json.load(json_file, object_pairs_hook=dict_raise_on_duplicates)
try:
validate(instance=events, schema=schema)
extra_validation(events, config)
except:
print("Error: validation for {:} failed.\nschema: {:}".format(input_file, schema_file))
raise
def validate_event_description(description: str, num_args: int):
""" validate event description or message
* Supported parsing:
* - characters can be escaped with \\, e.g. '\\<', '\\{'
* - tags:
* - <profile name="[!]NAME">CONTENT</profile>
* - <a [href="URL"]>CONTENT</a>
* if href is not found, use CONTENT as url
* - <param>PARAM_NAME</param>
* - unknown tags are ignored (including content)
* - no nested tags of the same type
* - arguments: following python syntax, with 1-based indexing (instead of 0)
* and custom types (units)
* - general form: {ARG_IDX[:.NUM_DECIMAL_DIGITS][UNIT]}
* UNIT:
* - m: horizontal distance in meters
* - m_v: vertical distance in meters
* - m^2: area in m^2
* - m/s: speed in m/s
* - C: temperature in degrees celcius
"""
# remove escaped characters to simplify parsing
check_str = description
backslash_idx = check_str.find('\\')
while backslash_idx != -1:
check_str = check_str[0:backslash_idx]+check_str[backslash_idx+2:]
backslash_idx = check_str.find('\\')
i = 0
while i < len(check_str):
# enforce using tags for url's
if check_str[i:i+7] == 'http://' or check_str[i:i+8] == 'https://':
raise Exception("freestanding url found in:\n\n{:}\n\n" \
"Use a tag in one of these formats:\n" \
"- <a>LINK</a>\n" \
"- <a href=\"LINK\">DESCRIPTION</a>".format(description))
if check_str[i] == '<':
# extract tag with 1 optional argument. Be strict with spacing to
# simplify library implementations
m = re.match(r"^<([a-z]+)(?: ([a-z]+)=\"([^\"]*)\")?>(.)", check_str[i:], re.DOTALL)
if not m:
raise Exception("Invalid tag format in:\n\n{:}\n\n" \
"General form: <TAG[ ARG=\"VAL\"]>CONTENT</TAG>\n"
"Use \\< to escape a single '<'".format(description))
#print(m.groups())
tag_name = m.group(1)
# "unknown" is for the tests
known_tags = ["a", "param", "profile", "unknown"]
if not tag_name in known_tags:
raise Exception("Unknown tag '<{:}>' in:\n\n{:}\n\n" \
"Known tags: {:}".format(tag_name, description, known_tags))
if tag_name == "profile":
known_profiles = ["dev", "normal"]
if m.group(3) is None:
raise Exception("Missing profile name in:\n\n{:}\n\n" \
"".format(description))
profile = m.group(3)
if profile.startswith('!'):
profile = profile[1:]
if m.group(2) != "name" or not profile in known_profiles:
raise Exception("Unknown profile '{:}={:}' in:\n\n{:}\n\n" \
"Known profiles: {:}".format(
m.group(2), profile, description, known_profiles))
content_start_idx = m.start(4)
end_tag_idx = check_str.find('</'+tag_name+'>', i+content_start_idx)
if end_tag_idx == -1:
raise Exception("Ending tag for '<{:}>' not found in:\n\n{:}\n\n" \
"".format(tag_name, description))
tag_content = check_str[i+content_start_idx:end_tag_idx]
# check for nested tags
if '<'+tag_name in tag_content:
raise Exception("Unsupported nested tag found for '<{:}>' in:\n\n{:}\n\n" \
"".format(tag_name, description))
# continue with checking the tag content
check_str = check_str[:i] + tag_content + check_str[end_tag_idx+len(tag_name)+3:]
elif check_str[i] == '{':
arg_end_idx = check_str.find('}', i)
if arg_end_idx == -1:
raise Exception("Invalid argument, no '}}' found in:\n\n{:}\n\n" \
"Use escaping for a literal output: '\\{{'".format(description))
arg = check_str[i+1:arg_end_idx]
m = re.match(r"^(\d+)(?::(?:\.(\d+))?)?(m|m_v|m/s|m\^2|C)?$", arg)
if not m:
raise Exception("Invalid argument ('{{{:}}}') in:\n\n{:}\n\n" \
"General form: {{ARG_IDX[:.NUM_DECIMAL_DIGITS][UNIT]}}" \
.format(arg, description))
#print(m.groups())
arg_idx = int(m.group(1)) - 1
if arg_idx < 0 or arg_idx >= num_args:
raise Exception("Invalid argument index ({:}) in:\n\n{:}\n\n" \
"Valid range: [1..{:}]".format(arg_idx+1, description, num_args))
i += len(arg) + 2
elif check_str[i] in ('}', '>'):
raise Exception("Found stray '{:}' in:\n\n{:}\n\n" \
"You might want to escape it with '\\'".format(check_str[i], description))
else:
i += 1
def validate_group(event, group_name, event_name, arguments):
""" rules for certain groups """
if group_name in ('arming_check', 'health') \
and (not event_name in ('arming_check_summary', 'health_summary')):
assert len(arguments) >= 2, \
"missing arguments for health/arming_check event {:}".format(event_name)
assert arguments[0] == 'common::navigation_mode_category_t', \
"first health/arming_check event {:} argument must " \
"be {:}, but is {:}".format(
event_name, 'common::navigation_mode_category_t', arguments[0])
assert arguments[1] == 'uint8_t', \
"second health/arming_check event {:} argument must " \
"be {:}, but is {:}".format(
event_name, 'uint8_t', arguments[1])
assert event['arguments'][1]['name'] == 'health_component_index', \
"second health/arming_check event {:} argument name must " \
"be {:}, but is {:}".format(
event_name, 'health_component_index', event['arguments'][1]['name'])
def validate_event_arguments(config, event, events, namespace):
""" ensure all enums exist
:return: list of argument types (normalized)
"""
if not "arguments" in event:
return []
arguments = []
arguments_size = 0
for arg in event["arguments"]:
if arg["type"] in common.base_types:
base_type = arg["type"]
arguments.append(base_type)
else:
try:
(base_type, normalized_type) = \
common.base_type_from_enum(events, namespace, arg["type"])
except:
print("Exception trying to get enum type " \
"for event: {:}".format(event["name"]))
raise
arguments.append(normalized_type)
arguments_size += common.base_types[base_type]['size']
if arguments_size > int(config['max_arguments_size']):
raise Exception("Argument size exceeded for event {:} ({:} > {:})" \
.format(namespace+'::'+event["name"], arguments_size, config['max_arguments_size']))
return arguments
def extra_validation(events, config):
""" Additional validation not possible with the schema.
Includes:
- unique names (enum, events, namespaces) & ID's (components & events)
- special rules for certain event groups
- event description & message
- event argument types (base type or enum)
"""
if not "components" in events:
return
all_namespaces = set()
for comp_id in events["components"]:
icomp_id = int(comp_id)
assert 0 <= icomp_id <= 255, "component id out of range: {}".format(icomp_id)
comp = events["components"][comp_id]
namespace = comp["namespace"]
if namespace in all_namespaces:
raise Exception("Duplicate namespace: {:}".format(namespace))
all_namespaces.add(namespace)
if "event_groups" in comp:
all_event_id = set()
all_event_names = set()
for group_name in comp["event_groups"]:
group = comp["event_groups"][group_name]
if not "events" in group:
continue
for event_sub_id in group["events"]:
sub_id = int(event_sub_id)
assert 0 <= sub_id <= 16777215, "event id out of range: {}".format(sub_id)
event = group["events"][event_sub_id]
event_name = event["name"]
if event_sub_id in all_event_id:
raise Exception("Duplicate event id: {:} ({:})".format(
event_sub_id, event['name']))
all_event_id.add(event_sub_id)
if event_name in all_event_names:
raise Exception("Duplicate event name: {:}".format(event_name))
all_event_names.add(event_name)
arguments = validate_event_arguments(config, event, events, namespace)
# rules for certain groups
validate_group(event, group_name, event_name, arguments)
validate_event_description(event["message"], len(arguments))
if "description" in event:
validate_event_description(event["description"], len(arguments))
if __name__ == "__main__":
main()
```
#### File: mavlink/doc/mavlink_gitbook.py
```python
import lxml.etree as ET
import requests
from bs4 import BeautifulSoup as bs
import re
import os # for walk
xsl_file_name = "mavlink_to_html_table_gitbook.xsl"
xml_message_definitions_dir_name = "../message_definitions/v1.0/"
output_dir = "./messages/"
output_dir_html=output_dir+"_html/"
if not os.path.exists(output_dir_html):
os.makedirs(output_dir_html)
# File for index
index_file_name = "README.md"
index_file_name = output_dir + index_file_name
# Get XSLT
with open(xsl_file_name, 'r') as content_file:
xsl_file = content_file.read()
xslt = ET.fromstring(xsl_file)
#initialise text for index file.
index_text="""<!-- THIS FILE IS AUTO-GENERATED (DO NOT UPDATE GITBOOK): https://github.com/mavlink/mavlink/blob/master/doc/mavlink_gitbook.py -->
# Dialects {#dialects}
MAVLink *dialects* are XML definition files that define *protocol-* and *vendor-specific* messages, enums and commands.
Dialects may *include* other MAVLink XML files, which may in turn contain other XML files (up to 5 levels of XML file nesting are allowed - see `MAXIMUM_INCLUDE_FILE_NESTING` in [mavgen.py](https://github.com/ArduPilot/pymavlink/blob/master/generator/mavgen.py#L44)).
A typical pattern is for a dialect to include [common.xml](../messages/common.md) (containing the *MAVLink standard definitions*), extending it with vendor or protocol specific messages.
## Standard Definitions
The following XML definition files are considered standard/core (i.e. not dialects):
* [minimal.xml](minimal.md) - the minimum set of entities (messages, enums, MAV_CMD) required to set up a MAVLink network.
* [standard.xml](standard.md) - the standard set of entities that are implemented by almost all flight stacks (at least 2, in a compatible way).
This `includes` [minimal.xml](minimal.md).
* [common.xml](../messages/common.md) - the set of entitites that have been implemented in at least one core flight stack.
This `includes` [standard.xml](minimal.md)
Further, [all.xml](all.md) is a _special case_.
It includes almost all other XML definition files, and can be used to verify that there are no ID clashes (and can potentially be used by GCS to communicate with any core dialect).
> **Note** We are still working towards moving the truly standard entities from **common.xml** to **standard.xml**
Currently you should include [common.xml](../messages/common.md)
## Core Dialects
Core dialects are stored in [mavlink/message definitions](https://github.com/mavlink/mavlink/blob/master/message_definitions/).
These are the dialects for the major MAVLink stakeholder flight stacks.
> **Note** Vendor forks of MAVLink may contain dialect messages that are not yet merged, and hence will not appear in this documentation.
Human-readable forms of all the the core dialects are linked below:
"""
index_text_trailer="""## External Dialects
MAVLink provides the [/external/dialects](https://github.com/mavlink/mavlink/tree/master/external/dialects) folder for dialects from projects that are not maintained by core MAVLink stakeholders or part of the MAVLink standard.
This mechanism is provided to help non-stakeholder dialect owners avoid clashes with other dialects (and the standard), and to ease integration of generic behaviours into the standard in future.
These are not managed by the core team and do not appear in this documentation.
Information about using the folder can be found in github: [/external/dialects](https://github.com/mavlink/mavlink/tree/master/external/dialects)
> **Note** We *highly* recommend that you work with the standard and core stakeholder dialects rather than using this approach (there are significant benefits in terms of compatibility and adoptability when using the standard definitions).
"""
#Fix up the BeautifulSoup output so to fix build-link errors in the generated gitbook.
## BS puts each tag/content in its own line. Gitbook generates anchors using the spaces/newlines.
## This puts displayed text content immediately within tags so that anchors/links generate properly
def fix_content_in_tags(input_html):
#print("fix_content_in_tags was called")
def remove_space_between_content_tags(matchobj):
stripped_string=matchobj.group(1).strip()
return '>%s<' % stripped_string
input_html=re.sub(r'\>(\s+?\w+?.*?)\<', remove_space_between_content_tags, input_html,flags=re.DOTALL)
return input_html
def fix_external_dialect_link(input_html):
#print("fix_external_dialect_link was called")
def fixupexternaldialecturls(matchobj):
return matchobj.group(1).strip()
input_html=re.sub(r'<a href="../../external/.*?>(.*?)</a>', fixupexternaldialecturls, input_html,flags=re.DOTALL)
return input_html
def fix_include_file_extension(input_html):
## Fixes up file extension .xml.md.unlikely (easier than fixing up the XSLT to strip file extensions!)
input_html=input_html.replace('.xml.md.unlikely','.md')
return input_html
def fix_replace_space_marker(input_html):
## Above we remove hidden space. I can't seem to regexp just that type of space, so use space markers in text
input_html=input_html.replace('xxx_space_xxx',' ')
return input_html
def strip_text_before_string(original_text,strip_text):
# Strip out all text before some string
index=original_text.find(strip_text)
stripped_string=original_text
if index !=-1 :
stripped_string = stripped_string[index:]
return stripped_string
def fix_add_implicit_links_items(input_html):
# Makes screaming snake case into anchors. Special fix for MAV_CMD.
#print("fix_add_implicit_link was called")
def make_text_to_link(matchobj):
#print("make_entry_to_link was called: %s" % matchobj.group(0))
item_string = matchobj.group(2)
item_url=item_string
if item_string == 'MAV_CMD':
item_url='mav_commands'
returnString = '%s<a href="#%s">%s</a>%s' % (matchobj.group(1),item_url,item_string,matchobj.group(3))
#print("returnstring: %s" % returnString)
return returnString
input_html=re.sub(r'([\`\(\s,]|^)([A-Z]{2,}(?:_[A-Z0-9]+)+)([\`\)\s\.,:]|$)', make_text_to_link, input_html,flags=re.DOTALL)
return input_html
def inject_top_level_docs(input_html,filename):
#Inject top level heading and other details.
print('FILENAME (prefix): %s' % filename)
insert_text='<!-- THIS FILE IS AUTO-GENERATED: https://github.com/mavlink/mavlink/blob/master/doc/mavlink_gitbook.py -->'
if filename == 'common':
insert_text+="""
# MAVLINK Common Message Set
The MAVLink *common* message set contains *standard* definitions that are managed by the MAVLink project.
The definitions cover functionality that is considered useful to most ground control stations and autopilots.
MAVLink-compatible systems are expected to use these definitions where possible (if an appropriate message exists) rather than rolling out variants in their own [dialects](../messages/README.md).
The original definitions are defined in [common.xml](https://github.com/mavlink/mavlink/blob/master/message_definitions/v1.0/common.xml).
> **Tip** The common set `includes` [minimal.xml](minimal.md), which contains the *minimal set* of definitions for any MAVLink system.
These definitions are [reproduced at the end of this topic](#minimal).
"""
elif filename == 'minimal':
insert_text+="""
# MAVLink Minimal Set
The MAVLink *minimal* set contains the minimal set of definitions for a viable MAVLink system.
The message set is defined in [minimal.xml](https://github.com/mavlink/mavlink/blob/master/message_definitions/v1.0/minimal.xml) and is managed by the MAVLink project.
> **Tip** The minimal set is included (imported into) other xml definition files, including the [MAVLink Common Message Set (common.xml)](minimal.md).
"""
elif filename == 'ardupilotmega':
insert_text+="""
# Dialect: ArduPilotMega
These messages define the ArduPilot specific message set, which is custom to [http://ardupilot.org](http://ardupilot.org).
This topic is a human-readable form of the XML definition file: [ardupilotmega.xml](https://github.com/mavlink/mavlink/blob/master/message_definitions/v1.0/ardupilotmega.xml).
> **Warning** The ArduPilot MAVLink fork of [ardupilotmega.xml](https://github.com/ArduPilot/mavlink/blob/master/message_definitions/v1.0/ardupilotmega.xml) may contain messages that have not yet been merged into this documentation.
"""
elif filename == 'development':
insert_text+="""
# Dialect: development
This dialect contains messages that are proposed for inclusion in the [standard set](standard.md), in order to ease development of prototype implementations.
They should be considered a 'work in progress' and not included in production builds.
This topic is a human-readable form of the XML definition file: [development.xml](https://github.com/mavlink/mavlink/blob/master/message_definitions/v1.0/development.xml).
"""
elif filename == 'all':
insert_text+="""
# Dialect: all
This dialect is intended to `include` all other [dialects](../messages/README.md) in the mavlink/mavlink repository (including [external dialects](https://github.com/mavlink/mavlink/tree/master/external/dialects#mavlink-external-dialects)).
Dialects that are in **all.xml** are guaranteed to not have clashes in messages, enums, enum ids, and MAV_CMDs.
This ensure that:
- Systems based on these dialects can co-exist on the same MAVLink network.
- A Ground Station might (optionally) use libraries generated from **all.xml** to communicate using any of the dialects.
> **Warning** New dialect files in the official repository must be added to **all.xml** and restrict themselves to using ids in their own allocated range.
A few older dialects are not included because these operate in completely closed networks or because they are only used for tests.
This topic is a human-readable form of the XML definition file: [all.xml](https://github.com/mavlink/mavlink/blob/master/message_definitions/v1.0/all.xml).
"""
else:
insert_text+='\n# Dialect: %s' % filename.rsplit('.',1)[0]
insert_text+='\n\n*This is a human-readable form of the XML definition file: [%s](https://github.com/mavlink/mavlink/blob/master/message_definitions/v1.0/%s).*' % (filename, filename)
insert_text+="""
<span></span>
> **Note** MAVLink 2 messages have an ID > 255 and are marked up using **(MAVLink 2)** in their description.
<span id="mav2_extension_field"></span>
> **Note** MAVLink 2 extension fields that have been added to MAVLink 1 messages are displayed in blue.
<style>
td {
vertical-align:top;
}
</style>
"""
# Include HTML in generated content
insert_text+='\n\n{%% include "_html/%s.html" %%}' % filename
input_html=insert_text+'\n\n'+input_html
if filename == 'common':
input_html+="""
# Minimal.xml {#minimal}
The minimal set of definitions required for any MAVLink system are included from [minimal.xml](minimal.md).
These are listed below.
{% include "_html/minimal.html" %}"""
#print(input_html)
return input_html
dialect_files = set()
all_files = set()
for subdir, dirs, files in os.walk(xml_message_definitions_dir_name):
#Generate html for all the XML files
for file in files:
print(file)
if not file.endswith('.xml'): #only process xml files.
continue
xml_file_name = xml_message_definitions_dir_name+file
with open(xml_file_name, 'r') as content_file:
xml_file = content_file.read()
dom = ET.fromstring(xml_file)
transform = ET.XSLT(xslt)
newdom = transform(dom)
#Prettify the HTML using BeautifulSoup
soup=bs(str(newdom), "lxml")
prettyHTML=soup.prettify()
#Strip out text before <html> tag in XSLT output
prettyHTML=strip_text_before_string(prettyHTML,'<html>')
prettyHTML = fix_content_in_tags(prettyHTML)
#Replace invalid file extensions (workaround for xslt)
prettyHTML = fix_include_file_extension(prettyHTML)
#Replace space markers with intentional space
prettyHTML = fix_replace_space_marker(prettyHTML)
#Fix up links to external dialects to not be links
prettyHTML = fix_external_dialect_link(prettyHTML)
#Fix up plain text mav symbols to be internal links
prettyHTML = fix_add_implicit_links_items(prettyHTML)
#Write output html file
output_file_name_html = file.rsplit('.',1)[0]+".html"
output_file_name_html_withdir = output_dir_html+output_file_name_html
print("Output filename (html): %s" % output_file_name_html)
with open(output_file_name_html_withdir, 'w') as out:
out.write(prettyHTML)
# Create sortable list of output file names
#Write output markdown file
output_file_name_prefix = file.rsplit('.',1)[0]
all_files.add(output_file_name_prefix)
if not file=='common.xml' and not file=='standard.xml' and not file=='minimal.xml':
dialect_files.add(output_file_name_prefix)
# Generate the markdown files
for file_prefix in all_files:
print(file_prefix)
markdown_text=''
#Inject a heading and doc-type intro (markdown format)
markdown_text = inject_top_level_docs(markdown_text,file_prefix)
output_file_name_md_withdir = output_dir+file_prefix+'.md'
print("Output filename (md): %s" % output_file_name_md_withdir)
with open(output_file_name_md_withdir, 'w') as out:
out.write(markdown_text)
for the_file in sorted(dialect_files):
index_text+='\n* [%s.xml](%s.md)' % (the_file,the_file)
index_text+='\n\n'
index_text+=index_text_trailer
#Write the index
with open(index_file_name, 'w') as content_file:
content_file.write(index_text)
print("COMPLETED")
```
#### File: pymavlink/examples/wptogpx.py
```python
from __future__ import print_function
from builtins import range
import time
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("wpfiles", metavar="WP_FILE", nargs="+")
args = parser.parse_args()
from pymavlink import mavwp
def wp_to_gpx(infilename, outfilename):
'''convert a wp file to a GPX file'''
wp = mavwp.MAVWPLoader()
wp.load(infilename)
outf = open(outfilename, mode='w')
def process_wp(w, i):
t = time.localtime(i)
outf.write('''<wpt lat="%s" lon="%s">
<ele>%s</ele>
<cmt>WP %u</cmt>
</wpt>
''' % (w.x, w.y, w.z, i))
def add_header():
outf.write('''<?xml version="1.0" encoding="UTF-8"?>
<gpx
version="1.0"
creator="pymavlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://www.topografix.com/GPX/1/0"
xsi:schemaLocation="http://www.topografix.com/GPX/1/0 http://www.topografix.com/GPX/1/0/gpx.xsd">
''')
def add_footer():
outf.write('''
</gpx>
''')
add_header()
count = 0
for i in range(wp.count()):
w = wp.wp(i)
if w.frame == 3:
w.z += wp.wp(0).z
if w.command == 16:
process_wp(w, i)
count += 1
add_footer()
print("Created %s with %u points" % (outfilename, count))
for infilename in args.wpfiles:
outfilename = infilename + '.gpx'
wp_to_gpx(infilename, outfilename)
```
#### File: javascript/test/make_tests.py
```python
import subprocess
import sys
# now tested and executes with this simple matrix of two mavtypes and two mav versions
cmddir = '../../../generator/C/test/posix/'
mavtypes = ['ardupilotmega','common']
versions = ['1.0','2.0']
cmds = []
#..so the C binding cmds executed/wrapped are: 'testmav1.0_ardupilotmega', 'testmav2.0_ardupilotmega', 'testmav1.0_common', 'testmav2.0_common'
#---------------------------------------------------------------------------------------
template1 = '''
it('id${ID} encode and decode ${NAME} from C using ${MAVTYPE}/${VERSION} ${SIGNED}', function() {
this.mav.seq = ${SEQ};
this.mav.srcSystem=${SRCSYS};
this.mav.srcComponent=${SRCCOMP};
'''
signing_extra_template = '''
//-------- START codeblock only for signed packets----------------
this.mav.seq = ${SEQ}-1;
// relevant to how we pass-in the Long object/s to jspack, we'll assume the calling user is smart enough to know that.
var wrap_long = function (someLong) {
return [someLong.getLowBitsUnsigned(), someLong.getHighBitsUnsigned()];
}
this.mav.signing.secret_key = new Buffer.from([ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 ]) ; // matches secret key in testmav.c
this.mav.signing.link_id = 0 ; // 1 byte // matches link_id in testmav.c
//this.mav.signing.timestamp = new Buffer.from([ 0,0,0,0,0,${TS}]); // 6 bytes // matches timestamp in testmav.c
this.mav.signing.timestamp = ${TS}; // at most 48 bits , fits in a native js number - matches timestamp in testmav.c
this.mav.signing.sign_outgoing = true; // todo false this
var epoch_offset = 1420070400;
var x= Long.fromString("${TS}", true);
var long_timestamp = wrap_long(x);
var target_system = 255; // the python impl in mavproxy uses 255 here , so we do, it could be this.sysid
var target_component = 0;
var secret_key = this.mav.signing.secret_key ;
MAVLink20Processor.prototype.send = function(mavmsg) {
buf = mavmsg.pack(this);
// no actual send here
this.seq = (this.seq + 1) % 256;
this.total_packets_sent +=1;
this.total_bytes_sent += buf.length;
}
var link_id =0;
var srcSystem=this.mav.srcSystem;
var srcComponent=this.mav.srcComponent;
stream_key = new Array(link_id,srcSystem,srcComponent).toString();
this.mav.signing.stream_timestamps[stream_key] = ${TS};
this.mav.signing.timestamp.should.eql(${TS}); //ts before setup
var setup_signing = new mavlink20.messages.setup_signing(target_system, target_component, secret_key, long_timestamp);
this.mav.send(setup_signing,this.sysid);
setup_signing.secret_key.should.eql(new Buffer.from([ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 ]) );
setup_signing.initial_timestamp.should.eql([${TS},0]);
//this.mav.signing.timestamp.should.eql(new Buffer.from([0,0,0,0,0,${TS}]));
this.mav.signing.timestamp.should.eql(${TS}+1); // ts after setup
this.mav.signing.link_id.should.eql(0);
this.mav.signing.sign_outgoing.should.eql(true);
//-------- END codeblock only for signed packets----------------
'''
template2 = '''
var test_${NAME} = this.tests.test_${NAME}()[0]; // get the assembled test object with test data already set, override just the min we need to do this test
//--- you can uncomment any of these to change the test, but you'll need to change the reference buffer to the right result too
//${FIELDS}
//---
// Create a buffer that matches what the Python version of MAVLink creates
var reference = new Buffer.from([${BUFFER}]);
this.mav.signing.timestamp = ${TS};// force ts to be correct, right before the pack() that matters
var p = test_${NAME}.pack(this.mav);
// console.log(p);
// p.forEach( x => { process.stdout.write( x.toString() ); process.stdout.write(" ") } ); process.stdout.write("\\n");
// p.forEach( x => { process.stdout.write( x.toString(16) ); process.stdout.write(" ") } ); process.stdout.write("\\n");
test_${NAME}._header.seq.should.eql(${SEQ});
test_${NAME}._header.srcSystem.should.eql(${SRCSYS});
test_${NAME}._header.srcComponent.should.eql(${SRCCOMP});
test_${NAME}._header.msgId.should.eql(test_${NAME}._id);
${SIGNED}test_${NAME}._header.incompat_flags.should.eql(1);
${UNSIGNED}test_${NAME}._header.incompat_flags.should.eql(0);
test_${NAME}._header.compat_flags.should.eql(0);
new Buffer.from(p).should.eql(reference);
});
'''
templatestart = '''
//
// (auto-generated by make_tests.py ), do not edit.
// generator by <EMAIL>
//
// Copyright David 'Buzz' Bussenschutt July 2020
// Released under GNU GPL version 3 or later
//
// you can regenerate this file and its dependencies and run its tests, by executing the following: "cd pymavlink/generator/javascript ; npm test"
// or see make_tests.py which created this.
//
should = require('should');
var Long = require('long');
'''
templateheader = '''
//--------------------------------------------------------------------------------------------------------------------------------------------------------
describe('end-to-end node byte-level tests of ${MAVTYPE}/${VERSION} against C impl', function() {
beforeEach(function() {
var {mavlink${VERS}, MAVLink${VERS}Processor} = require('../implementations/mavlink_${MAVTYPE}_v${VERSION}/mavlink.js');// hardcoded here by make_tests.py generator
this.mav = new MAVLink${VERS}Processor(null, 42, 150); // hardcoded here by make_tests.py generator
this.tests = require('../implementations/mavlink_${MAVTYPE}_v${VERSION}/mavlink.tests.js');//// hardcoded here by make_tests.py generator
// be sure the test library is using the right version before we call into it
this.tests.set_mav(this.mav);
});'''
templatefooter = '''
});'''
#------------------------------------------------
def is_packet_and_field_in_long_list(pname,fname):
global llines
for l in llines:
if ( pname+'.'+fname in l ) :
return (True, l)
return (False, '')
testid = 1;
# for each of the 1.0,2.0 and common,ardupilotmega combos write tests
def do_make_output(mt,v,lines):
global testid
t = templateheader.replace('${MAVTYPE}',mt)
t = t.replace('${VERSION}',v)
t = t.replace('${VERS}',v.replace('.',''))
print(t)
last_line = ''
for line in lines:
if line.startswith('fd '): # mavlink2 start byte as human-readable hex, eg 'fd 08 7e 2a 0b e2 00 00 88 41 00 00 34 42 30 93 '
last_line = '2.0'
if v == 1: # if param 'v' requested mav1 data and we see mav2 data, ignore it
continue
hexdata = line.strip().replace(' ',', 0x')
hexdata = '0x'+hexdata
# ckeck if signing bit is set on this packet: 0xfd, 0x09, 0x01 <-- that 1
signchar = hexdata[15]
signint = int(signchar,16)
signbit = signint % 2; # lowest bit to true/false
if line.startswith('fe '): # mavlink1 start byte as human-readable hex, eg 'fe 08 7e 2a 0b e2 00 00 88 41 00 00 34 42 30 93 '
last_line = '1.0'
if v == 2: # if param 'v' requested mav2 data and we see mav1 data, ignore it
continue
hexdata = line.strip().replace(' ',', 0x')
hexdata = '0x'+hexdata
signbit = False
if line.startswith('sysid:'): # packet details human-readable eg 'sysid:42 compid:11 seq:126 RPM { rpm1: 17.000000 rpm2: 45.000000 }'
# skip parsing data if it's not this parser
if last_line != v:
continue
fields = line.split(' ')
sysid = fields[0].split(':')[1]
compid = fields[1].split(':')[1]
seq = fields[2].split(':')[1]
# lines without sign_ts, the { is earlier
if fields[4] == '{':
sign_ts = '0'
packetname = fields[3]
more = fields[4:]
else:
sign_ts = fields[3].split(':')[1]
packetname = fields[4]
more = fields[5:]
packetname = packetname.lower()
arraystarted = False
for i,x in enumerate(more):
if x == '[':
arraystarted = True
if x == ']':
arraystarted = False
more[i] = '], \n '
if not arraystarted and x == '':
more[i] = ',\n '
fixed = ''.join(more)
fixed = fixed.replace(',]',']') # drop unneeded comma from end of arrays
fixed = fixed.replace('{','');
fixed = fixed.replace('}','');
fixed = fixed.replace(':','=') # move : to =
import re
fixed = re.sub(r'(\'.*?\')', '\\1,\n ', fixed)
# now iterate over them as parsed lines safely
newlines = []
for fieldline in fixed.split('\n'):
if fieldline.strip() == '':
continue
# a little miniparser here to modify things after the = sign to insert our own test values, not the included ones, but leave
# value wrapping and other surrounding casting functions as-is.
( field, value) = fieldline.split('=');
if not value.startswith('['):
value = value.replace(',','')
field = field.replace(' ','')
else: # array
value = value.split('[')[1].split(']')[0]; # stuff inside the brackets, but not the brackets
(retval,match) = is_packet_and_field_in_long_list(packetname,field);
if retval == True:
parts = match.split('=');
after_equals = parts[1];
before_semicolon = after_equals.split(';')[0]
# determine old test value in the 'before_semicolon' line segment:
# a little miniparser here to modify things after the = sign to insert our own test values, not the included ones, but leave
# value wrapping and other surrounding casting functions as-is.
if not before_semicolon.replace(' ','').startswith('['):
if '"' in before_semicolon:
oldvalue = before_semicolon.split('"')[1]; # stuff inside the ", but not the "
elif 'Array' in before_semicolon:
oldvalue = before_semicolon.split('[')[1].split(']')[0]; # [1234]
else:
oldvalue = before_semicolon; # unwrapped numbers etc
else: # array
oldvalue = before_semicolon.split('[')[1].split(']')[0]; # stuff inside the brackets, but not the brackets
line_minus_data = before_semicolon.replace(oldvalue,value);
newlines.append(" test_"+packetname+"."+field+'='+line_minus_data+';')
else:
newlines.append(" test_"+packetname+"."+field+'='+value+';')
fixed = '\n//'.join(newlines)
t = template1
t = t.replace('${MAVTYPE}',mt)
t = t.replace('${VERSION}',v)
t = t.replace('${NAME}',packetname)
t = t.replace('${ID}',str(testid))
testid = testid+1
t = t.replace('${SEQ}',seq)
t = t.replace('${SRCSYS}',sysid)
t = t.replace('${SRCCOMP}',compid)
t = t.replace('${BUFFER}',hexdata)
if signbit:
t = t.replace('${SIGNED}','signed')
else :
t = t.replace('${SIGNED}','')
print(t)
if signbit:
t = signing_extra_template
t = t.replace('${SEQ}',seq)
t = t.replace('${TS}',sign_ts)
print(t)
t = template2
t = t.replace('${NAME}',packetname)
t = t.replace('${SEQ}',seq)
t = t.replace('${SRCSYS}',sysid)
t = t.replace('${SRCCOMP}',compid)
t = t.replace('${BUFFER}',hexdata)
t = t.replace('${FIELDS}',fixed)
t = t.replace('${TS}',sign_ts)
if signbit:
t = t.replace('${SIGNED}','/*signed*/ ')
t = t.replace('${UNSIGNED}','//unsigned ')
else :
t = t.replace('${SIGNED}','//signed ')
t = t.replace('${UNSIGNED}','/*unsigned*/ ')
print(t)
print('//------------------------------------------------------')
# append footer
t = templatefooter.replace('${MAVTYPE}',mt)
t = t.replace('${VERSION}',v)
t = t.replace('${VERS}',v.replace('.',''))
print(t)
#---------------------------------------------------------------------------------------
# example line from file:
# test_actuator_output_status.time_usec = Long.fromNumber(93372036854775807, true); // fieldtype: uint64_t isarray: False
llines = []
def make_long_lookup_table(mt,v):
global llines
_cmd = 'egrep "(wrap_long|new.*Array)" ../implementations/mavlink_'+mt+'_v'+v+'/mavlink.tests.js'; # relevant lines only
_result = subprocess.run(_cmd, stdout=subprocess.PIPE, shell=True)
_data = _result.stdout.decode('utf-8')
llines = _data.split("\n")
#return lines
#---------------------------------------------------------------------------------------
print(templatestart)
for mt in mavtypes:
for v in versions:
cmd = cmddir+'testmav'+v+'_'+mt
result = subprocess.run(cmd, stdout=subprocess.PIPE)
data = result.stdout.decode('utf-8')
lines = data.split("\n")
llines = []
make_long_lookup_table(mt, v);
do_make_output(mt,v,lines)
print("//output done")
sys.exit()
```
#### File: pymavlink/tests/test_mavlogdump.py
```python
from __future__ import absolute_import, print_function
import unittest
import os
import pkg_resources
import sys
class MAVLogDumpTest(unittest.TestCase):
"""
Class to test mavlogdump
"""
def __init__(self, *args, **kwargs):
"""Constructor, set up some data that is reused in many tests"""
super(MAVLogDumpTest, self).__init__(*args, **kwargs)
def test_dump_same(self):
"""Test dump of file is what we expect"""
test_filename = "test.BIN"
test_filepath = pkg_resources.resource_filename(__name__,
test_filename)
dump_filename = "tmp.dump"
os.system("mavlogdump.py %s >%s" % (test_filepath, dump_filename))
with open(dump_filename) as f:
got = f.read()
possibles = ["test.BIN.py3.dumped",
"test.BIN.dumped"]
success = False
for expected in possibles:
expected_filepath = pkg_resources.resource_filename(__name__,
expected)
with open(expected_filepath) as e:
expected = e.read()
if expected == got:
success = True
assert True
if __name__ == '__main__':
unittest.main()
```
#### File: pymavlink/tools/magfit_rotation_gps.py
```python
from __future__ import print_function
from builtins import range
from builtins import object
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--no-timestamps",dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--declination", default=0.0, type=float, help="magnetic declination")
parser.add_argument("--min-speed", default=4.0, type=float, help="minimum GPS speed")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
from pymavlink.rotmat import Vector3, Matrix3
from math import radians, degrees, sin, cos, atan2
class Rotation(object):
def __init__(self, roll, pitch, yaw, r):
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.r = r
def in_rotations_list(rotations, m):
for r in rotations:
m2 = m.transposed() * r.r
(r, p, y) = m2.to_euler()
if (abs(r) < radians(1) and
abs(p) < radians(1) and
abs(y) < radians(1)):
return True
return False
def generate_rotations():
'''generate all 90 degree rotations'''
rotations = []
for yaw in [0, 90, 180, 270]:
for pitch in [0, 90, 180, 270]:
for roll in [0, 90, 180, 270]:
m = Matrix3()
m.from_euler(radians(roll), radians(pitch), radians(yaw))
if not in_rotations_list(rotations, m):
rotations.append(Rotation(roll, pitch, yaw, m))
return rotations
def angle_diff(angle1, angle2):
'''give the difference between two angles in degrees'''
ret = angle1 - angle2
if ret > 180:
ret -= 360;
if ret < -180:
ret += 360
return ret
def heading_difference(mag, attitude, declination):
r = attitude.roll
p = attitude.pitch
headX = mag.x*cos(p) + mag.y*sin(r)*sin(p) + mag.z*cos(r)*sin(p)
headY = mag.y*cos(r) - mag.z*sin(r)
heading = degrees(atan2(-headY,headX)) + declination
heading2 = degrees(attitude.yaw)
return abs(angle_diff(heading, heading2))
def add_errors(mag, attitude, total_error, rotations):
for i in range(len(rotations)):
r = rotations[i].r
rmag = r * mag
total_error[i] += heading_difference(rmag, attitude, args.declination)
def magfit(logfile):
'''find best magnetometer rotation fit to a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps)
# generate 90 degree rotations
rotations = generate_rotations()
print("Generated %u rotations" % len(rotations))
count = 0
total_error = [0]*len(rotations)
attitude = None
gps = None
# now gather all the data
while True:
m = mlog.recv_match()
if m is None:
break
if m.get_type() == "ATTITUDE":
attitude = m
if m.get_type() == "GPS_RAW_INT":
gps = m
if m.get_type() == "RAW_IMU":
mag = Vector3(m.xmag, m.ymag, m.zmag)
if attitude is not None and gps is not None and gps.vel > args.min_speed*100 and gps.fix_type>=3:
add_errors(mag, attitude, total_error, rotations)
count += 1
best_i = 0
best_err = total_error[0]
for i in range(len(rotations)):
r = rotations[i]
print("(%u,%u,%u) err=%.2f" % (
r.roll,
r.pitch,
r.yaw,
total_error[i]/count))
if total_error[i] < best_err:
best_i = i
best_err = total_error[i]
r = rotations[best_i]
print("Best rotation (%u,%u,%u) err=%.2f" % (
r.roll,
r.pitch,
r.yaw,
best_err/count))
for filename in args.logs:
magfit(filename)
```
#### File: pymavlink/tools/mavgraph.py
```python
from __future__ import print_function
from builtins import input
from builtins import range
import datetime
import matplotlib
import os
import re
import sys
import time
from math import *
try:
from pymavlink.mavextra import *
except:
print("WARNING: Numpy missing, mathematical notation will not be supported.")
if sys.version_info[0] >= 3:
text_types = frozenset([str,])
else:
text_types = frozenset([unicode, str])
# cope with rename of raw_input in python3
try:
input = raw_input
except NameError:
pass
colourmap = {
'ardupilot' : {
'MANUAL' : (1.0, 0, 0),
'AUTO' : ( 0, 1.0, 0),
'LOITER' : ( 0, 0, 1.0),
'FBWA' : (1.0, 0.5, 0),
'RTL' : ( 1, 0, 0.5),
'STABILIZE' : (0.5, 1.0, 0),
'LAND' : ( 0, 1.0, 0.5),
'STEERING' : (0.5, 0, 1.0),
'HOLD' : ( 0, 0.5, 1.0),
'ALT_HOLD' : (1.0, 0.5, 0.5),
'CIRCLE' : (0.5, 1.0, 0.5),
'POSITION' : (1.0, 0.0, 1.0),
'GUIDED' : (0.5, 0.5, 1.0),
'ACRO' : (1.0, 1.0, 0),
'CRUISE' : ( 0, 1.0, 1.0)
},
'px4' : {
'MANUAL' : (1.0, 0, 0),
'SEATBELT' : ( 0.5, 0.5, 0),
'EASY' : ( 0, 1.0, 0),
'AUTO' : ( 0, 0, 1.0),
'UNKNOWN' : ( 1.0, 1.0, 1.0)
}
}
colourmap["apm"] = colourmap["ardupilot"]
edge_colour = (0.1, 0.1, 0.1)
lowest_x = None
highest_x = None
def plotit(x, y, fields, colors=[]):
'''plot a set of graphs using date for x axis'''
global lowest_x, highest_x
pylab.ion()
fig = pylab.figure(num=1, figsize=(12,6))
ax1 = fig.gca()
ax2 = None
xrange = 0.0
for i in range(0, len(fields)):
if len(x[i]) == 0: continue
if lowest_x is None or x[i][0] < lowest_x:
lowest_x = x[i][0]
if highest_x is None or x[i][-1] > highest_x:
highest_x = x[i][-1]
if highest_x is None or lowest_x is None:
return
xrange = highest_x - lowest_x
xrange *= 24 * 60 * 60
formatter = matplotlib.dates.DateFormatter('%H:%M:%S')
interval = 1
intervals = [ 1, 2, 5, 10, 15, 30, 60, 120, 240, 300, 600,
900, 1800, 3600, 7200, 5*3600, 10*3600, 24*3600 ]
for interval in intervals:
if xrange / interval < 15:
break
locator = matplotlib.dates.SecondLocator(interval=interval)
if not args.xaxis:
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(formatter)
empty = True
ax1_labels = []
ax2_labels = []
for i in range(0, len(fields)):
if len(x[i]) == 0:
print("Failed to find any values for field %s" % fields[i])
continue
if i < len(colors):
color = colors[i]
else:
color = 'red'
(tz, tzdst) = time.tzname
if axes[i] == 2:
if ax2 is None:
ax2 = ax1.twinx()
ax = ax2
if not args.xaxis:
ax2.xaxis.set_major_locator(locator)
ax2.xaxis.set_major_formatter(formatter)
label = fields[i]
if label.endswith(":2"):
label = label[:-2]
ax2_labels.append(label)
else:
ax1_labels.append(fields[i])
ax = ax1
if args.xaxis:
if args.marker is not None:
marker = args.marker
else:
marker = '+'
if args.linestyle is not None:
linestyle = args.linestyle
else:
linestyle = 'None'
ax.plot(x[i], y[i], color=color, label=fields[i],
linestyle=linestyle, marker=marker)
else:
if args.marker is not None:
marker = args.marker
else:
marker = 'None'
if args.linestyle is not None:
linestyle = args.linestyle
else:
linestyle = '-'
if len(y[i]) > 0 and type(y[i][0]) in text_types:
# assume this is a piece of text to be rendered at a point in time
last_text_time = -1
last_text = None
for n in range(0, len(x[i])):
if last_text is None:
last_text = "[" + y[i][n] + "]"
last_text_time = x[i][n]
elif x[i][n] == last_text_time:
last_text += "[" + y[i][n] + "]"
else:
ax.text(x[i][n], 10, last_text,
rotation=90,
alpha=0.3,
verticalalignment='baseline')
last_text = None
last_label_time = x[i][n]
if last_text is not None:
ax.text(x[i][n], 10, last_text,
rotation=90,
alpha=0.3,
verticalalignment='baseline')
else:
ax.plot_date(x[i], y[i], color=color, label=fields[i],
linestyle=linestyle, marker=marker, tz=None)
empty = False
if args.flightmode is not None:
for i in range(len(modes)-1):
c = colourmap[args.flightmode].get(modes[i][1], edge_colour)
ax1.axvspan(modes[i][0], modes[i+1][0], fc=c, ec=edge_colour, alpha=0.1)
c = colourmap[args.flightmode].get(modes[-1][1], edge_colour)
ax1.axvspan(modes[-1][0], ax1.get_xlim()[1], fc=c, ec=edge_colour, alpha=0.1)
if ax1_labels != []:
ax1.legend(ax1_labels,loc=args.legend)
if ax2_labels != []:
ax2.legend(ax2_labels,loc=args.legend2)
if empty:
print("No data to graph")
return
return fig
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--no-timestamps", dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--planner", action='store_true', help="use planner file format")
parser.add_argument("--condition", default=None, help="select packets by a condition")
parser.add_argument("--labels", default=None, help="comma separated field labels")
parser.add_argument("--legend", default='upper left', help="default legend position")
parser.add_argument("--legend2", default='upper right', help="default legend2 position")
parser.add_argument("--marker", default=None, help="point marker")
parser.add_argument("--linestyle", default=None, help="line style")
parser.add_argument("--xaxis", default=None, help="X axis expression")
parser.add_argument("--multi", action='store_true', help="multiple files with same colours")
parser.add_argument("--zero-time-base", action='store_true', help="use Z time base for DF logs")
parser.add_argument("--flightmode", default=None,
help="Choose the plot background according to the active flight mode of the specified type, e.g. --flightmode=apm for ArduPilot or --flightmode=px4 for PX4 stack logs. Cannot be specified with --xaxis.")
parser.add_argument("--dialect", default="ardupilotmega", help="MAVLink dialect")
parser.add_argument("--output", default=None, help="provide an output format")
parser.add_argument("--timeshift", type=float, default=0, help="shift time on first graph in seconds")
parser.add_argument("logs_fields", metavar="<LOG or FIELD>", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
if args.flightmode is not None and args.xaxis:
print("Cannot request flightmode backgrounds with an x-axis expression")
sys.exit(1)
if args.flightmode is not None and args.flightmode not in colourmap:
print("Unknown flight controller '%s' in specification of --flightmode (choose from %s)" % (args.flightmode, ",".join(colourmap.keys())))
sys.exit(1)
if args.output is not None:
matplotlib.use('Agg')
import pylab
filenames = []
fields = []
for f in args.logs_fields:
if os.path.exists(f):
filenames.append(f)
else:
fields.append(f)
msg_types = set()
multiplier = []
field_types = []
colors = [ 'red', 'green', 'blue', 'orange', 'olive', 'black', 'grey', 'yellow', 'brown', 'darkcyan', 'cornflowerblue', 'darkmagenta', 'deeppink', 'darkred']
# work out msg types we are interested in
x = []
y = []
modes = []
axes = []
first_only = []
re_caps = re.compile('[A-Z_][A-Z0-9_]+')
for f in fields:
caps = set(re.findall(re_caps, f))
msg_types = msg_types.union(caps)
field_types.append(caps)
y.append([])
x.append([])
axes.append(1)
first_only.append(False)
def add_data(t, msg, vars, flightmode):
'''add some data'''
mtype = msg.get_type()
if args.flightmode is not None and (len(modes) == 0 or modes[-1][1] != flightmode):
modes.append((t, flightmode))
if mtype not in msg_types:
return
for i in range(0, len(fields)):
if mtype not in field_types[i]:
continue
f = fields[i]
if f.endswith(":2"):
axes[i] = 2
f = f[:-2]
if f.endswith(":1"):
first_only[i] = True
f = f[:-2]
v = mavutil.evaluate_expression(f, vars)
if v is None:
continue
if args.xaxis is None:
xv = t
else:
xv = mavutil.evaluate_expression(args.xaxis, vars)
if xv is None:
continue
y[i].append(v)
x[i].append(xv)
def process_file(filename, timeshift):
'''process one file'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps, zero_time_base=args.zero_time_base, dialect=args.dialect)
vars = {}
all_messages = {}
while True:
msg = mlog.recv_match(args.condition)
if msg is None: break
try:
tdays = matplotlib.dates.date2num(datetime.datetime.fromtimestamp(msg._timestamp+timeshift))
except ValueError:
# this can happen if the log is corrupt
# ValueError: year is out of range
break
all_messages[msg.get_type()] = msg
add_data(tdays, msg, all_messages, mlog.flightmode)
if len(filenames) == 0:
print("No files to process")
sys.exit(1)
if args.labels is not None:
labels = args.labels.split(',')
if len(labels) != len(fields)*len(filenames):
print("Number of labels (%u) must match number of fields (%u)" % (
len(labels), len(fields)*len(filenames)))
sys.exit(1)
else:
labels = None
timeshift = args.timeshift
for fi in range(0, len(filenames)):
f = filenames[fi]
process_file(f, timeshift)
timeshift = 0
for i in range(0, len(x)):
if first_only[i] and fi != 0:
x[i] = []
y[i] = []
if labels:
lab = labels[fi*len(fields):(fi+1)*len(fields)]
else:
lab = fields[:]
if args.multi:
col = colors[:]
else:
col = colors[fi*len(fields):]
fig = plotit(x, y, lab, colors=col)
for i in range(0, len(x)):
x[i] = []
y[i] = []
if args.output is None:
pylab.show()
pylab.draw()
input('press enter to exit....')
else:
fname, fext = os.path.splitext(args.output)
if fext == '.html':
import mpld3
html = mpld3.fig_to_html(fig)
f_out = open(args.output, 'w')
f_out.write(html)
f_out.close()
else:
pylab.legend(loc=2,prop={'size':8})
pylab.savefig(args.output, bbox_inches='tight', dpi=200)
```
#### File: pymavlink/tools/serial_control_to_shell.py
```python
import optparse
import os
import pymavlink
import re
import select
import subprocess
import time
import fcntl
from pymavlink import mavutil
class SerialControlToShell(object):
'''reads SERIAL_CONTROL packets and passes them to a shell, returning textual results'''
def __init__(self, connection_string, system_id=1, component_id=10):
self.connection_string = connection_string
self.serial_control_dev = mavutil.mavlink.SERIAL_CONTROL_DEV_SHELL
self.mav = mavutil.mavlink_connection(
self.connection_string,
source_system=system_id,
source_component=component_id,
)
self.mixed_output_from_shell = ""
self.last_heartbeat_sent = 0
def send_heartbeats(self):
now = time.time()
if now - self.last_heartbeat_sent > 0.5:
self.last_heartbeat_sent = now
self.mav.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_GCS,
mavutil.mavlink.MAV_AUTOPILOT_INVALID,
0,
0,
0)
def debug(self, msg):
print("DEBUG: %s" % msg)
def open_shell(self):
self.shell = subprocess.Popen(["/bin/bash"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd="/tmp")
fcntl.fcntl(self.shell.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(self.shell.stderr.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
def run(self):
self.open_shell()
os.environ["PYTHONUNBUFFERED"]="1"
while True:
m = self.mav.recv_match(type="SERIAL_CONTROL", timeout=1000)
self.send_heartbeats()
shell_failure = self.shell.poll()
if shell_failure is not None:
self.debug("Shell is dead, restarting (%s)" % shell_failure)
self.open_shell()
if select.select([self.shell.stderr, self.shell.stdout],[],[],0)[0] != []:
try:
self.mixed_output_from_shell += self.shell.stderr.read()
except IOError as e:
if e.errno != 11:
raise
try:
self.mixed_output_from_shell += self.shell.stdout.read()
except IOError as e:
if e.errno != 11:
raise
while len(self.mixed_output_from_shell):
data = self.mixed_output_from_shell[:70]
self.mixed_output_from_shell = self.mixed_output_from_shell[70:]
data_len = len(data)
data = [ord(x) for x in list(data)]
data = data + ([0] * (70-len(data)))
self.mav.mav.serial_control_send(
self.serial_control_dev,
mavutil.mavlink.SERIAL_CONTROL_FLAG_REPLY,
0, # timeout
0, # baud
data_len,
data
)
if m is None:
time.sleep(0.1)
continue
if m.device != self.serial_control_dev:
continue
if m.count == 0:
continue
b = m.data[:m.count]
text = "".join([chr(a) for a in b])
text = re.sub("\r\n", "\n", text) # not quite right, doesn't take into account \r at end of data
self.shell.stdin.write(text)
if __name__ == '__main__':
parser = optparse.OptionParser("bisect.py ")
parser.add_option("", "--system-id",
type=int,
help="This script's system ID",
default=1,
)
parser.add_option("", "--component-id",
type=int,
help="This script's component ID",
default=10,
)
(opts, args) = parser.parse_args()
s = SerialControlToShell(
args[0],
system_id=opts.system_id,
component_id=opts.component_id,
)
s.run()
``` |
{
"source": "jinrith27/Realestate",
"score": 2
} |
#### File: realestate/api/models.py
```python
import binascii
from django.contrib.auth.models import User
import os
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=User)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
class ApiKeys(models.Model):
description = models.CharField(max_length=100, blank=True, default='', verbose_name=_('Description'))
key = models.CharField(max_length=40, primary_key=True)
created = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = _('API Key')
verbose_name_plural = _('API Keys')
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
return super(ApiKeys, self).save(*args, **kwargs)
def generate_key(self):
return binascii.hexlify(os.urandom(20))
def __unicode__(self):
return self.key
```
#### File: Realestate/realestate/context_processors.py
```python
from django.contrib.sites.models import Site
from django.conf import settings
def get_site_url(request, slash=False):
domain = Site.objects.get_current().domain
protocol = 'https' if request.is_secure() else 'http'
root = "%s://%s" % (protocol, domain)
if slash:
root += '/'
return root
def absolute(request):
urls = {
'ABSOLUTE_ROOT': request.build_absolute_uri('/')[:-1].strip("/"),
'ABSOLUTE_ROOT_URL': request.build_absolute_uri('/').strip("/"),
}
if 'django.contrib.sites' in settings.INSTALLED_APPS:
urls['SITE_ROOT'] = get_site_url(request)
urls['SITE_ROOT_URL'] = get_site_url(request, True)
return urls
def site_name(request):
return {'site_name': Site.objects.get_current().name}
```
#### File: realestate/listing/models.py
```python
from decimal import Decimal
import os
import re
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from django.template.defaultfilters import slugify
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from djmoney.models.fields import MoneyField
from moneyed import USD, Money
from realestate.home.models import Contact
from sorl.thumbnail import ImageField
TYPES = (
('house', _('Houses')),
('villa', _('Villas')),
('penthouse', _('Penthouses')),
('apartment', _('Apartments')),
('residencial-land', _('Residential Land')),
('corporate-office', _('Corporate Offices')),
('commercial-office', _('Commercial Offices')),
('commercial-space', _('Commercial Space')),
('industrial-building', _('Industrial Buildings')),
('commercial-warehouses', _('Commercial Warehouses')),
('commercial-land', _('Commercial Land')),
)
LOCATION_STREET = 'street'
LOCATION_SECTOR = 'sector'
LOCATION_CITY = 'city'
LOCATION_STATE = 'state'
LOCATION_TYPES = (
(LOCATION_STREET, _('Street')),
(LOCATION_SECTOR, _('Sector')),
(LOCATION_CITY, _('City')),
(LOCATION_STATE, _('State/Province')),
)
OFFERS = (
('buy', _('For Sale')),
('rent', _('For Rent')),
('buy-rent', _('For Sale/For Rent'))
)
VALIDATIONS = [
('realestate.listing.utils.validation_simple', _('One or more characters')),
('realestate.listing.utils.validation_integer', _('Integer')),
('realestate.listing.utils.validation_yesno', _('Yes/No')),
('realestate.listing.utils.validation_decimal', _('Decimal')),
]
class LocationManager(models.Manager):
def states(self, **kwargs):
return self.filter(location_type=LOCATION_STATE, **kwargs)
def cities(self, **kwargs):
return self.filter(location_type=LOCATION_CITY, **kwargs)
def sectors(self, **kwargs):
return self.filter(location_type=LOCATION_SECTOR, **kwargs)
def streets(self, **kwargs):
return self.filter(location_type=LOCATION_STREET, **kwargs)
class Location(models.Model):
parent = models.ForeignKey('self', verbose_name=_('Location'), null=True,
blank=True)
name = models.CharField(_('Name'), max_length=60)
location_type = models.CharField(_('Location Type'),
choices=LOCATION_TYPES,
default=LOCATION_SECTOR, max_length=20)
objects = LocationManager()
def __unicode__(self):
location_tree = self.get_parent_name(self, [])
return ', '.join(location_tree)
def __str__(self):
return self.__unicode__()
def get_parent_name(self, location, names):
names.append(location.name)
if location.parent is None:
return names
return self.get_parent_name(location.parent, names)
class AgentManager(models.Manager):
def active(self, **kwargs):
return self.filter(active=True, **kwargs)
def with_listings(self, **kwargs):
return self.active(listing__isnull=False, **kwargs)
class Agent(models.Model):
first_name = models.CharField(max_length=30, verbose_name=_('First name'))
last_name = models.CharField(max_length=30, verbose_name=_('Last name'))
phone = models.CharField(max_length=15, verbose_name=_('Phone'), null=True, blank=True)
mobile = models.CharField(max_length=15, verbose_name=_('Cellphone'), null=True, blank=True)
location = models.ForeignKey(Location, verbose_name=_('Location'), null=True, blank=True)
address = models.CharField(max_length=200, verbose_name=_('Address'), null=True, blank=True)
image = ImageField(upload_to='agents/', default='', verbose_name=_('Picture'), null=True, blank=True)
user = models.OneToOneField(User, verbose_name=_('User'), null=True, blank=True)
active = models.BooleanField(default=False, verbose_name=_('Active'))
objects = AgentManager()
@property
def name(self):
return '%s %s' % (self.first_name, self.last_name)
@property
def email(self):
return self.user.email if self.user is not None else None
def __unicode__(self):
return self.name
class Meta:
verbose_name = _('Agent')
verbose_name_plural = _('Agents')
class ListingManager(models.Manager):
def active(self, **kwargs):
return self.filter(active=True, **kwargs)
def featured(self, **kwargs):
return self.active().filter(featured=True)
def rent(self, **kwargs):
return self.active().filter(offer__in=('buy-rent', 'rent'))
def sale(self, **kwargs):
return self.active().filter(offer__in=('buy-rent', 'buy'))
class Listing(models.Model):
title = models.CharField(max_length=100, verbose_name=_('Title'))
slug = models.SlugField(max_length=100, unique=True, blank=False, verbose_name=_('Slug'))
description = models.TextField(verbose_name=_('Description'), null=True, blank=True)
price = MoneyField(default=Money(0, USD), max_digits=12, decimal_places=2, verbose_name=_('Price'))
location = models.ForeignKey(Location, null=True, blank=True)
type = models.CharField(_('Listing Type'), max_length=30, choices=TYPES)
offer = models.CharField(max_length=10, choices=OFFERS, verbose_name=_('Offer'))
active = models.BooleanField(_('Active'), default=False)
featured = models.BooleanField(default=False, verbose_name=_('Featured'))
baths = models.PositiveIntegerField(_('Bathrooms'), default=0, null=True, blank=True)
beds = models.PositiveIntegerField(_('Bedrooms'), default=0, null=True, blank=True)
size = models.PositiveIntegerField(_('Size(m2)'), default=0, null=True, blank=True)
coords = models.CharField(max_length=255, default='19.000000,-70.400000', verbose_name=_('Coords'), null=True,
blank=True)
agent = models.ForeignKey(Agent, null=True, blank=True, verbose_name=_('Agent'))
contact = models.ForeignKey(Contact, null=True, blank=True)
notes = models.TextField(max_length=500, verbose_name=_('Private Notes'), null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_('Created'))
last_modified = models.DateTimeField(auto_now=True, verbose_name=_('Last Modified'))
objects = ListingManager()
@property
def main_image(self):
im = self.images.all()
if im.count():
return im[0]
return None
@property
def image_list(self):
return [{'title': image.name, 'url': image.absolute_url, 'order': image.order} for image in self.images.all()]
@property
def address(self):
return self.get_address()
def get_address(self):
if self.location is None:
return _('No location provided')
return self.location
def __unicode__(self):
return self.title
class Meta:
verbose_name = _('Listing')
verbose_name_plural = _('Listings')
ordering = ['-pk', ]
def save(self, **kwargs):
self._generate_valid_slug()
super(Listing, self).save(**kwargs)
def _generate_valid_slug(self):
if not self.is_valid_slug():
slug = slugify(self.title)
while Listing.objects.filter(slug=slug).exclude(id=self.id).exists():
slug_parts = slug.split('-')
if slug_parts[-1].isdigit():
slug_parts[-1] = '%s' % (int(slug_parts[-1]) + 1)
else:
slug_parts.append('2')
slug = '-'.join(slug_parts)
self.slug = slug
def is_valid_slug(self):
if self.slug is None or len(self.slug) < 10:
return False
match = re.match('[^\w\s-]', self.slug)
if not match:
return False
return self.slug == slugify(self.slug)
@property
def absolute_url(self):
return self.get_absolute_url()
def get_absolute_url(self):
return reverse('property_details', args=[self.slug])
def get_features(self):
attributes = []
for attribute in self.attributelisting_set.all():
attribute_name = _(attribute.attribute.name)
if attribute.attribute.validation == 'realestate.listing.utils.validation_simple':
attributes.append('{0}: {1}'.format(attribute_name, attribute.value))
elif attribute.attribute.validation == 'realestate.listing.utils.validation_yesno':
attributes.append(attribute_name)
else:
if attribute.attribute.validation == 'realestate.listing.utils.validation_integer':
attributes.append('{0} {1}'.format(attribute.value, attribute_name))
else:
attributes.append('{0:.2f} {1}'.format(Decimal(attribute.value), attribute_name))
return attributes
@property
def nearby(self):
return Listing.objects.active(location=self.location).exclude(id=self.id).order_by('?')
@property
def has_baths_or_beds(self):
return self.should_have_beds or self.should_have_baths
@property
def suggested(self):
qs = Listing.objects.active(type=self.type)
price = self.price
lh = price * .90
rh = price * 1.10
if self.has_baths_or_beds:
if self.should_have_baths:
qs = qs.filter(baths=self.baths)
if self.should_have_beds:
qs = qs.filter(beds=self.beds)
if qs.count() == 0:
qs = Listing.objects.active(type=self.type, price__range=(lh, rh))
else:
qs = qs.filter(price__range=(lh, rh))
return qs.exclude(id=self.id).order_by('?')
@property
def should_have_beds(self):
return self.type in ('house', 'penthouse', 'apartment', 'villa',)
@property
def should_have_baths(self):
return 'land' not in self.type
@property
def on_sale(self):
return Deal.objects.on_sale(listing__in=(self,)).exists()
@property
def code(self):
if self.agent is not None:
agent = self.agent
prefix = '{0}{1}'.format(agent.first_name[0], agent.last_name[0])
return '{0}{1:04}'.format(prefix, self.id).upper()
rent_or_sale = 'v' if self.offer in ('buy-rent', 'buy') else 'r'
return '{0}{1:04x}'.format(rent_or_sale, self.id).upper()
class Attribute(models.Model):
name = models.CharField(_('Attribute'), max_length=100)
validation = models.CharField(_('Value type'), choices=VALIDATIONS, max_length=100)
class Meta:
ordering = ('name',)
verbose_name = _('Attribute')
verbose_name_plural = _('Attributes')
def __unicode__(self):
return self.__str__()
def __str__(self):
return self.name
class AttributeListing(models.Model):
listing = models.ForeignKey(Listing)
attribute = models.ForeignKey(Attribute)
value = models.CharField(_('Value'), max_length=255)
order = models.SmallIntegerField(_('Order'), default=99)
class Meta:
verbose_name = _('Listing attribute')
verbose_name_plural = _('Listing attributes')
ordering = ['order', ]
def __unicode__(self):
return '%s: %s' % (self.attribute.name, self.value)
class ListingImage(models.Model):
listing = models.ForeignKey(Listing, related_name='images', verbose_name=_('Listing'))
name = models.CharField(_('Name'), max_length=60)
image = ImageField(_('Image'), upload_to='listing/')
added = models.DateTimeField(_('Added'), auto_now_add=True)
order = models.PositiveSmallIntegerField(_('Order'), default=99, null=True)
ordering = ['order']
@property
def absolute_url(self):
try:
return self.image.url
except ValueError:
return ''
def get_filename(self):
return os.path.basename(self.image.path)
def __unicode__(self):
return self.name
class Meta:
verbose_name = _('Picture')
verbose_name_plural = _('Pictures')
class DealManager(models.Manager):
def active(self, **kwargs):
return self.filter(active=True, **kwargs)
def on_sale(self, **kwargs):
now = timezone.now()
return self.active(start_date__lte=now, end_date__gte=now, **kwargs)
class Deal(models.Model):
listing = models.ForeignKey(Listing, verbose_name=_('Listing'))
price = MoneyField(_('Sale Price'), default=Money(0, USD), max_digits=12, decimal_places=2)
active = models.BooleanField(_('Active'), default=False)
start_date = models.DateTimeField(verbose_name=_('Activation date'))
end_date = models.DateTimeField(verbose_name=_('Deactivation date'))
objects = DealManager()
def __unicode__(self):
if self.listing.location is not None:
return '%s - %s' % (self.listing.title, self.listing.location.name)
return self.listing.title
class Meta:
verbose_name = _('Deal')
verbose_name_plural = _('Deals')
```
#### File: realestate/listing/sitemap.py
```python
from django.contrib.sitemaps import Sitemap
from realestate.listing.models import Listing
class ListingSitemap(Sitemap):
changefreq = 'monthly'
priority = 0.5
def items(self):
return Listing.objects.active()
def lastmod(self, obj):
return obj.last_modified
```
#### File: realestate/listing/utils.py
```python
import string
from decimal import Decimal
from django.utils import six
from django.db.models import AutoField
def validation_simple(value, obj=None):
"""
Validates that at least one character has been entered.
Not change is made to the value.
"""
# TODO: Translate
if value is None or len(value) == 0:
return False, value, u'El valor digitado debe tener uno o más caracteres'
return True, value, ''
def validation_integer(value, obj=None):
"""
Validates that value is an integer number.
No change is made to the value
"""
try:
int(value)
return True, value, ''
except:
# TODO: Translate
return False, value, u'El valor digitado no es un número entero'
def validation_yesno(value, obj=None):
"""
Validates that yes or no is entered.
Converts the yes or no to capitalized version
"""
if value is not None:
if six.PY3:
if str.upper(value) in ["YES", "NO"]:
return True, str.capitalize(value), ''
else:
if string.upper(value) in ["YES", "NO"]:
return True, string.capitalize(value), ''
# TODO: Translate
return False, value, u'El valor digitado debe ser YES o NO'
def validation_decimal(value, obj=None):
"""
Validates that the number can be converted to a decimal
"""
try:
Decimal(value)
return True, value, ''
except:
# TODO: Translate
return False, value, u'El valor digitado debe ser un número decimal'
def import_validator(validator):
if validator is None:
raise ImportError
try:
import_name, function_name = validator.rsplit('.', 1)
except ValueError:
# no dot; treat it as a global
func = globals().get(validator, None)
if not func:
# we use ImportError to keep error handling for callers simple
raise ImportError
return validator
else:
# The below __import__() call is from python docs, and is equivalent to:
#
# from import_name import function_name
#
import_module = __import__(import_name, globals(), locals(), [function_name])
return getattr(import_module, function_name)
def validate_attribute_value(attribute, value, obj):
"""
Helper function for forms that wish to validation a value for an
AttributeOption.
"""
return import_validator(attribute.validation)(value, obj)
def copy_model_instance(obj):
"""
Taken from https://djangosnippets.org/snippets/1040/
"""
initial = dict([
(f.name, getattr(obj, f.name)) for f in obj._meta.fields if
not isinstance(f, AutoField) and not f in obj._meta.parents.values()
])
return obj.__class__(**initial)
```
#### File: management/commands/import_data.py
```python
from django.core.management.base import BaseCommand, CommandError
import os, json, requests, glob
class Command(BaseCommand):
help = 'imports external json data and images'
# def add_arguments(self, parser):
# parser.add_argument('import_dir', nargs='+', type=str)
def handle(self, *args, **options):
print 'importing images'
# get auth token first
r = requests.post("http://localhost:8000/api-token-auth/", data={'username': 'admin', 'password': '<PASSWORD>'})
assert r.status_code == 200
print 'got security token', r.json()
token = r.json().get('token')
print token
# use auth token to post data via api
for root, dirs, files in os.walk("./import_data"):
path = root.split('/')
if "data.json" in files:
with open(os.path.join(root, 'data.json')) as data_file:
data = json.load(data_file)
else:
continue
print file
headers = {'Authorization': 'Token %s' % token}
values = {
'title': data['property']['summary'],
'description': data['property']['fullDescription'],
'coords': "{}, {}".format(data['property']['latitude'], data['property']['longitude']),
'features': json.dumps(data['property']['features'])
}
files = [] # append here all jpg in "root" variable
for image in glob.glob(os.path.join(root, '*.jpg')):
files.append((os.path.basename(image), open(image, 'rb')))
r = requests.post("http://localhost:8000/api/listings/", headers=headers, files=files, data=values)
```
#### File: Realestate/tests/runtests.py
```python
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'realestate',
'realestate.home',
'realestate.listing',
# Deps
'constance',
'sorl.thumbnail',
'widget_tweaks',
'rest_framework',
'rest_framework.authtoken',
'haystack',
],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.db"
}
},
MEDIA_ROOT=os.path.join(os.path.dirname(__file__), 'media'),
MEDIA_URL='/media/',
STATIC_URL='/static/',
CONSTANCE_CONFIG={
'PROPERTIES_PER_PAGE': (16, 'Properties per page'),
'RECENTLY_ADDED': (6, 'Recently Added'),
'CONTACT_DEFAULT_EMAIL': ('<EMAIL>', 'Contact form email')
},
CONSTANCE_REDIS_CONNECTION_CLASS='tests.redis_mockup.Connection',
EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend',
ROOT_URLCONF='realestate.urls',
TEMPLATE_DIRS=(os.path.abspath(os.path.join(os.path.dirname(__file__), '../realestate/templates')), ),
TEMPLATE_CONTEXT_PROCESSORS=("django.core.context_processors.request",),
HAYSTACK_CONNECTIONS={
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200/',
'INDEX_NAME': 'realestate',
},
},
MIDDLEWARE_CLASSES=[
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
]
)
def runtests(*test_args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
parent = os.path.dirname(os.path.abspath(__file__))
parent = os.path.join(parent, '../')
sys.path.insert(0, parent)
django.setup()
print(parent)
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
test_args = ['realestate']
failures = runner_class(verbosity=1, interactive=True, failfast=False).run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests()
``` |
{
"source": "jinrongc1986/events-log",
"score": 2
} |
#### File: jinrongc1986/events-log/external_plugin_deps.bzl
```python
load("//tools/bzl:maven_jar.bzl", "maven_jar")
def external_plugin_deps():
maven_jar(
name = "mockito",
artifact = "org.mockito:mockito-core:2.21.0",
sha1 = "cdd1d0d5b2edbd2a7040735ccf88318c031f458b",
deps = [
"@byte_buddy//jar",
"@byte_buddy_agent//jar",
"@objenesis//jar",
],
)
BYTE_BUDDY_VER = "1.8.15"
maven_jar(
name = "byte_buddy",
artifact = "net.bytebuddy:byte-buddy:" + BYTE_BUDDY_VER,
sha1 = "cb36fe3c70ead5fcd016856a7efff908402d86b8",
)
maven_jar(
name = "byte_buddy_agent",
artifact = "net.bytebuddy:byte-buddy-agent:" + BYTE_BUDDY_VER,
sha1 = "a2dbe3457401f65ad4022617fbb3fc0e5f427c7d",
)
maven_jar(
name = "objenesis",
artifact = "org.objenesis:objenesis:2.6",
sha1 = "639033469776fd37c08358c6b92a4761feb2af4b",
)
maven_jar(
name = "hikaricp",
artifact = "com.zaxxer:HikariCP:3.2.0",
sha1 = "6c66db1c636ee90beb4c65fe34abd8ba9396bca6",
)
``` |
{
"source": "jinrong-lu/dat129_ccac",
"score": 4
} |
#### File: dat129_ccac/week4/PittsCityProject.py
```python
import csv
def area_budgeted_amount():
file=open('PittsCityCapitalProject.csv', newline='')
reader = csv.DictReader(file)
area_total={'Administration/Sub-Award':0,
'Engineering and Construction':0,
'Facility Improvement':0,
'Neighborhood and Community Development':0,
'Public_Safety':0,
'Vehicles and Equipment':0}
for row in reader:
if row['area']=='Administration/Sub-Award':
area_total['Administration/Sub-Award']+=float(row['budgeted_amount'])
elif row['area'] == 'Engineering and Construction':
area_total['Engineering and Construction']+=float(row['budgeted_amount'])
elif row['area'] == 'Facility Improvement':
area_total['Facility Improvement']+=float(row['budgeted_amount'])
elif row['area'] == 'Neighborhood and Community Development':
area_total['Neighborhood and Community Development']+=float(row['budgeted_amount'])
elif row['area'] == 'Vehicles and Equipment':
area_total['Vehicles and Equipment'] += float(row['budgeted_amount'])
elif row['area'] == 'Public_Safety':
area_total['Public_Safety'] += float(row['budgeted_amount'])
for k in area_total:
print('The budgeted_amount of' +k+ 'is:'+str(area_total[k]))
return area_total
def computerpercent(budgeted_amountDict):
total=0
for k in budgeted_amountDict:
total+=budgeted_amountDict[k]
for k in budgeted_amountDict:
perc=budgeted_amountDict[k]/total
print("Percent of "+k+": "+"{0:.2%}".format(perc))
def main():
a=area_budgeted_amount()
computerpercent(a)
main()
```
#### File: dat129_ccac/week 5/apipractice_week5.py
```python
def readFile():
apiURL='https://api.donorschoose.org/common/json_feed.html?snacks-for-better-testing/4653507/&APIKey=DONORSCHOOSE&showSynopsis=true&max=50'
#result = getAverage(apiURL)
#print(result)
trans(apiURL)
import requests, json
def getAverage(url):
rep=requests.get(url)
runningTotal = 0
avg = 0
if(int(rep.status_code)==200):
apiDict = json.loads(rep.text)
proposalList = apiDict['proposals']
for p in proposalList:
runningTotal +=float(p['totalPrice'])
avg=runningTotal/ len(proposalList)
return avg
import csv
def trans(url):
rep=requests.get(url)
apiDict=json.loads(rep.text)
csv_proposals=apiDict['proposals']
with open ('keywords.txt', 'r') as keywords:
with open('list.csv', 'w', newline='') as csvfile:
csv_writer=csv.writer(csvfile)
csv_writer.writerow(keywords)
for p in csv_proposals:
csv_values=[]
for keywords in open('keywords.txt', newline='\n'):
for key in keywords:
csv_values+=p[key]
with open ('list.csv', 'a+', newline='') as csvfile:
csv_writer=csv.writer(csvfile)
csv_writer.writerow(csv_values)
readFile()
``` |
{
"source": "jinrudals/wit",
"score": 3
} |
#### File: lib/wit/common.py
```python
import sys
from .witlogger import getLogger
log = getLogger()
def error(*args, **kwargs):
log.error(*args, **kwargs)
sys.exit(1)
def print_errors(errors):
if len(errors) > 0:
log.info("") # print newline
for err in errors:
log.info("--- ERROR ---")
log.info(err)
class WitUserError(Exception):
"""
Supertype of user-input errors that should be reported without stack traces
"""
pass
```
#### File: lib/wit/inspect.py
```python
import sys
from .common import print_errors
from .witlogger import getLogger
log = getLogger()
def inspect_tree(ws, args):
packages, errors = ws.resolve()
if args.tree:
tree = {}
for dep in ws.manifest.dependencies:
tree[dep.get_id()] = dep.crawl_dep_tree(ws.root, ws.repo_paths, packages)
for key in tree:
top_dep = tree[key]
x, _ = _deduplicate_tree(top_dep)
_print_generic_tree(x)
if args.dot:
_print_dot_tree(ws, packages)
print_errors(errors)
BOXED_DEPS = False
VERBOSE_GRAPH = False
def _deduplicate_tree(tree, seen=None):
tree = tree.copy()
seen = seen or []
tag = tree.pop('')
ident = tag[-8:]
out = {'': tag}
if ident in seen:
return out, seen
else:
seen.append(ident)
for key in tree:
out[key], seen = _deduplicate_tree(tree[key], seen)
return out, seen
def _print_dot_tree(ws, packages_dict):
packages = list(packages_dict.values())
log.output('digraph dependencies {')
log.output('root [label="[root]"]')
pkg_ids = []
for pkg in packages:
pkg_id = pkg.get_id()
pkg_ids.append(pkg_id)
log.output('{} [label="{}"]'.format(pkg_id, pkg.id()))
drawn_connections = []
def draw_connection(from_id, to_id, dotted=False):
if from_id == to_id:
return
pair = (from_id, to_id)
if pair not in drawn_connections:
log.output("{} -> {}{}".format(from_id, to_id, " [style=dotted]" if dotted else ""))
drawn_connections.append(pair)
def print_dep(pkg, dep):
pkg_id = pkg.get_id()
dep_id = dep.get_id()
dep.load(packages_dict, ws.repo_paths, ws.root, False)
if dep.package.repo is None:
log.error("Cannot generate graph with missing repo '{}'".format(dep.name))
sys.exit(1)
dep_pkg_id = dep.package.get_id()
if dep.id() != dep.package.id() or VERBOSE_GRAPH:
draw_connection(dep_id, dep_pkg_id, dotted=True)
log.output('{} [label="{}"]{}'.format(dep_id, dep.id(),
" [shape=box]" if BOXED_DEPS else ""))
draw_connection(pkg_id, dep_id)
else:
draw_connection(pkg_id, dep_pkg_id)
for dep in ws.manifest.dependencies:
print_dep(ws, dep)
for pkg in packages:
for dep in pkg.get_dependencies():
print_dep(pkg, dep)
log.output('}')
def _print_generic_tree(data):
tag = data.pop('')
print(tag)
return _recur_print_generic_tree(0, data, [])
def _recur_print_generic_tree(depth, data, done_cols):
def print_indent(depth):
for i in range(0, depth):
if i in done_cols:
print(" ", end="")
else:
print("│ ", end="")
done_cols_copy = done_cols[:]
keys = list(data.keys())
for i, key in enumerate(keys):
subdata = data[key]
subtag = subdata.pop('')
print_indent(depth)
if i == len(keys)-1:
print("└─", end="")
done_cols_copy.append(depth)
else:
print("├─", end="")
print(subtag)
_recur_print_generic_tree(depth+1, subdata, done_cols_copy)
```
#### File: lib/wit/manifest.py
```python
from pathlib import Path
from .witlogger import getLogger
from .repo_entries import RepoEntries
log = getLogger()
# TODO
# Should this actually be shared between package manifests and workspace descriptions?
# Should we use different datastructures?
class Manifest:
"""
Common class for the description of package dependencies and a workspace
"""
def __init__(self, dependencies):
self.dependencies = dependencies
def get_dependency(self, name: str):
for d in self.dependencies:
if d.name == name:
return d
return None
def contains_dependency(self, name: str) -> bool:
return self.get_dependency(name) is not None
def add_dependency(self, dep):
resolved = dep.resolved()
log.debug("Adding to manifest: {}".format(resolved))
self.dependencies.append(resolved)
def replace_dependency(self, dep) -> None:
newdeps = []
found = False
for d in self.dependencies:
if d.name == dep.name:
resolved = dep.resolved()
log.debug("New replace dep: {}".format(resolved))
newdeps.append(resolved)
found = True
else:
newdeps.append(d)
assert found, \
"Trying to update '{}' but it doesn't exist in manifest!".format(dep.name)
self.dependencies = newdeps
def write(self, path):
contents = [d.to_repo_entry() for d in self.dependencies]
RepoEntries.write(path, contents)
@staticmethod
def read_manifest(path, safe=False):
if safe and not Path(path).exists():
return Manifest([])
entries = RepoEntries.read(path)
from .dependency import Dependency
deps = [Dependency.from_repo_entry(e) for e in entries]
return Manifest(deps)
if __name__ == '__main__':
import doctest
doctest.testmod()
```
#### File: lib/wit/repo_entries.py
```python
import json
import sys
from enum import Enum
from pathlib import Path
from typing import List
# The intent of Format, RepoEntry and List[RepoEntry] is that no other
# parts of the codebase knows that json is used as the on-disk format, or know
# any of the field names.
# Version numbers will be encoded in the format from '3' onwards.
class Format(Enum):
Lock = 1
Manifest = 2
@staticmethod
def from_path(path: Path):
if path.name == "wit-lock.json":
return Format.Lock
if path.name == "wit-workspace.json" or path.name == "wit-manifest.json":
return Format.Manifest
raise Exception("Unknown format for {}".format(str(path)))
class RepoEntry:
def __init__(self, checkout_path, revision, remote_url, message=None):
# The path to checkout at within the workspace.
# JSON field name is 'name'.
self.checkout_path = checkout_path
# Desired revision that exists in the history of the below remote.
# JSON field name is 'commit'
self.revision = revision
# Url (or local fs path) for git to clone/fetch/push.
# JSON field name is 'source'
self.remote_url = remote_url
# A comment to leave in any serialized artifacts.
# Optional. JSON field name is '//'
self.message = message
def __repr__(self):
return str(self.__dict__)
# OriginalEntry encodes the RepoEntry for both Lock and Manifest formats.
class OriginalEntry():
@staticmethod
def to_dict(entry: RepoEntry) -> dict:
d = {
"name": entry.checkout_path,
"commit": entry.revision,
"source": entry.remote_url,
}
if entry.message:
d["//"] = entry.message
return d
@staticmethod
def from_dict(data: dict) -> RepoEntry:
return RepoEntry(data["name"],
data["commit"],
data.get("source"), # 'repo path' cli option needs this optional
data.get("//")) # optional
# Utilities for List[RepoEntry]
class RepoEntries:
@staticmethod
def write(path: Path, entries: List[RepoEntry]):
fmt = Format.from_path(path)
if fmt is Format.Manifest:
manifest_data = [OriginalEntry.to_dict(e) for e in entries]
json_data = json.dumps(manifest_data, sort_keys=True, indent=4) + "\n"
if fmt is Format.Lock:
lock_data = dict((e.checkout_path, OriginalEntry.to_dict(e)) for e in entries)
json_data = json.dumps(lock_data, sort_keys=True, indent=4) + "\n"
path.write_text(json_data)
@staticmethod
def read(path: Path) -> List[RepoEntry]:
text = path.read_text()
# 'parse' has to be decoupled from 'read' as sometimes
# we read files directly from the git object store rather
# than the filesystem
return RepoEntries.parse(text, path, "")
@staticmethod
def parse(text: str, path: Path, rev: str) -> List[RepoEntry]:
try:
fromtext = json.loads(text)
except json.JSONDecodeError as e:
print("Failed to parse json in {}:{}: {}".format(path, rev, e.msg))
sys.exit(1)
entries = []
fmt = Format.from_path(path)
if fmt is Format.Manifest:
for entry in fromtext:
entries.append(OriginalEntry.from_dict(entry))
if fmt is Format.Lock:
for _, entry in fromtext.items():
entries.append(OriginalEntry.from_dict(entry))
# Check for duplicates
names = [entry.checkout_path for entry in entries]
if len(names) != len(set(names)):
dup = set([x for x in names if names.count(x) > 1])
print("Two repositories have same checkout path in {}:{}: {}".format(path, rev, dup))
sys.exit(1)
return entries
``` |
{
"source": "jin/rules_eta",
"score": 2
} |
#### File: rules_eta/eta/defs.bzl
```python
def _eta_library_impl(ctx):
eta_inputs = [ctx.file._eta]
metrics_dir = ctx.actions.declare_directory("metrics")
headers_dir = ctx.actions.declare_directory("headers")
args = ctx.actions.args()
args.add_all(["-metricsdir", "metrics"])
args.add_all(["-o", ctx.outputs._jar.path])
args.add_all(["-hidir", headers_dir.path])
args.add_all(ctx.files.srcs)
# Regular compile action
ctx.actions.run(
arguments = [args],
executable = ctx.executable._eta,
inputs = ctx.files.srcs + eta_inputs,
mnemonic = "EtaCompile",
outputs = [
ctx.outputs._jar,
metrics_dir,
headers_dir,
],
progress_message = "Compiling %s using Eta" % ctx.attr.name,
use_default_shell_env = True, # TODO: Needed to pass `--action_env=HOME`. Try not to use default shell env for hermeticity
)
return [
DefaultInfo(
files = depset([
ctx.outputs._jar,
headers_dir,
]),
),
JavaInfo(
output_jar = ctx.outputs._jar,
compile_jar = ctx.outputs._jar, # TODO: ijar?
)
]
# Unused until the https://github.com/typelead/eta/commit/f37e972b6a6d2ad6140718afbf0a4eb2612f51d0
# is in a release.
# ETA_BINARY_PACKAGE_PREFIX = "binaries/cdnverify.eta-lang.org/eta-0.8.6.1/binaries/x86_64-osx"
eta_library = rule(
implementation = _eta_library_impl,
outputs = {
"_jar": "%{name}.jar",
},
attrs = {
"srcs": attr.label_list(allow_files = [".hs"]),
# TODO: Figure out provider situation
# "deps": attr.label_list(),
# TODO: Download eta compiler directly
"_eta": attr.label(
default = "@eta//:eta",
allow_single_file = True,
executable = True,
cfg = "host",
),
},
)
def eta_repositories(bin_path):
native.new_local_repository(
name = "eta",
path = bin_path,
build_file_content = """
package(default_visibility = ["//visibility:public"])
filegroup(
name = "bin",
srcs = glob(["*"]),
)
"""
)
``` |
{
"source": "jin/rules_scala-1",
"score": 2
} |
#### File: private/rules/scala_binary.bzl
```python
load(
"@io_bazel_rules_scala//scala/private:common_attributes.bzl",
"common_attrs",
"implicit_deps",
"launcher_template",
"resolve_deps",
)
load("@io_bazel_rules_scala//scala/private:common_outputs.bzl", "common_outputs")
load(
"@io_bazel_rules_scala//scala/private:rule_impls.bzl",
"collect_jars_from_common_ctx",
"declare_executable",
"get_scalac_provider",
"get_unused_dependency_checker_mode",
"scala_binary_common",
"write_executable",
"write_java_wrapper",
)
def _scala_binary_impl(ctx):
scalac_provider = get_scalac_provider(ctx)
unused_dependency_checker_mode = get_unused_dependency_checker_mode(ctx)
unused_dependency_checker_is_off = unused_dependency_checker_mode == "off"
jars = collect_jars_from_common_ctx(
ctx,
scalac_provider.default_classpath,
unused_dependency_checker_is_off = unused_dependency_checker_is_off,
)
(cjars, transitive_rjars) = (jars.compile_jars, jars.transitive_runtime_jars)
wrapper = write_java_wrapper(ctx, "", "")
executable = declare_executable(ctx)
out = scala_binary_common(
ctx,
executable,
cjars,
transitive_rjars,
jars.transitive_compile_jars,
jars.jars2labels,
wrapper,
unused_dependency_checker_ignored_targets = [
target.label
for target in scalac_provider.default_classpath +
ctx.attr.unused_dependency_checker_ignored_targets
],
unused_dependency_checker_mode = unused_dependency_checker_mode,
deps_providers = jars.deps_providers,
)
write_executable(
ctx = ctx,
executable = executable,
jvm_flags = ctx.attr.jvm_flags,
main_class = ctx.attr.main_class,
rjars = out.transitive_rjars,
use_jacoco = False,
wrapper = wrapper,
)
return out
_scala_binary_attrs = {
"main_class": attr.string(mandatory = True),
"classpath_resources": attr.label_list(allow_files = True),
"jvm_flags": attr.string_list(),
}
_scala_binary_attrs.update(launcher_template)
_scala_binary_attrs.update(implicit_deps)
_scala_binary_attrs.update(common_attrs)
_scala_binary_attrs.update(resolve_deps)
scala_binary = rule(
attrs = _scala_binary_attrs,
executable = True,
fragments = ["java"],
outputs = common_outputs,
toolchains = ["@io_bazel_rules_scala//scala:toolchain_type"],
implementation = _scala_binary_impl,
)
``` |
{
"source": "jin-s13/mmaction2",
"score": 3
} |
#### File: datasets/pipelines/formating.py
```python
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..registry import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
"""Convert some values in results dict to `torch.Tensor` type in data
loader pipeline.
Args:
keys (Sequence[str]): Required keys to be converted.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Performs the ToTensor formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return f'{self.__class__.__name__}(keys={self.keys})'
@PIPELINES.register_module()
class ToDataContainer:
"""Convert the data to DataContainer.
Args:
fields (Sequence[dict]): Required fields to be converted
with keys and attributes. E.g.
fields=(dict(key='gt_bbox', stack=False),).
"""
def __init__(self, fields):
self.fields = fields
def __call__(self, results):
"""Performs the ToDataContainer formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for field in self.fields:
_field = field.copy()
key = _field.pop('key')
results[key] = DC(results[key], **_field)
return results
def __repr__(self):
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class ImageToTensor:
"""Convert image type to `torch.Tensor` type.
Args:
keys (Sequence[str]): Required keys to be converted.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Performs the ImageToTensor formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = to_tensor(results[key].transpose(2, 0, 1))
return results
def __repr__(self):
return f'{self.__class__.__name__}(keys={self.keys})'
@PIPELINES.register_module()
class Transpose:
"""Transpose image channels to a given order.
Args:
keys (Sequence[str]): Required keys to be converted.
order (Sequence[int]): Image channel order.
"""
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
"""Performs the Transpose formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return (f'{self.__class__.__name__}('
f'keys={self.keys}, order={self.order})')
@PIPELINES.register_module()
class Collect:
"""Collect data from the loader relevant to the specific task.
This keeps the items in ``keys`` as it is, and collect items in
``meta_keys`` into a meta item called ``meta_name``.This is usually
the last stage of the data loader pipeline.
For example, when keys='imgs', meta_keys=('filename', 'label',
'original_shape'), meta_name='img_meta', the results will be a dict with
keys 'imgs' and 'img_meta', where 'img_meta' is a DataContainer of another
dict with keys 'filename', 'label', 'original_shape'.
Args:
keys (Sequence[str]): Required keys to be collected.
meta_name (str): The name of the key that contains meta infomation.
This key is always populated. Default: "img_meta".
meta_keys (Sequence[str]): Keys that are collected under meta_name.
The contents of the ``meta_name`` dictionary depends on
``meta_keys``.
By default this includes:
- "filename": path to the image file
- "label": label of the image file
- "original_shape": original shape of the image as a tuple
(h, w, c)
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the
bottom/right, if the batch tensor is larger than this shape.
- "pad_shape": image shape after padding
- "flip_direction": a str in ("horiziontal", "vertival") to
indicate if the image is fliped horizontally or vertically.
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
"""
def __init__(self,
keys,
meta_keys=('filename', 'label', 'original_shape', 'img_shape',
'pad_shape', 'flip_direction', 'img_norm_cfg'),
meta_name='img_meta'):
self.keys = keys
self.meta_keys = meta_keys
self.meta_name = meta_name
def __call__(self, results):
"""Performs the Collect formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
data = {}
for key in self.keys:
data[key] = results[key]
if len(self.meta_keys) != 0:
meta = {}
for key in self.meta_keys:
meta[key] = results[key]
data[self.meta_name] = DC(meta, cpu_only=True)
return data
def __repr__(self):
return (f'{self.__class__.__name__}('
f'keys={self.keys}, meta_keys={self.meta_keys})')
@PIPELINES.register_module()
class FormatShape:
"""Format final imgs shape to the given input_format.
Required keys are "imgs", "num_clips" and "clip_len", added or modified
keys are "imgs" and "input_shape".
Args:
input_format (str): Define the final imgs format.
"""
def __init__(self, input_format):
self.input_format = input_format
if self.input_format not in ['NCTHW', 'NCHW', 'NCHW_Flow', 'NPTCHW']:
raise ValueError(
f'The input format {self.input_format} is invalid.')
def __call__(self, results):
"""Performs the FormatShape formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
imgs = results['imgs']
# [M x H x W x C]
# M = 1 * N_crops * N_clips * L
if self.input_format == 'NCTHW':
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs = np.transpose(imgs, (0, 1, 5, 2, 3, 4))
# N_crops x N_clips x C x L x H x W
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
# M' x C x L x H x W
# M' = N_crops x N_clips
elif self.input_format == 'NCHW':
imgs = np.transpose(imgs, (0, 3, 1, 2))
# M x C x H x W
elif self.input_format == 'NCHW_Flow':
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs = np.transpose(imgs, (0, 1, 2, 5, 3, 4))
# N_crops x N_clips x L x C x H x W
imgs = imgs.reshape((-1, imgs.shape[2] * imgs.shape[3]) +
imgs.shape[4:])
# M' x C' x H x W
# M' = N_crops x N_clips
# C' = L x C
elif self.input_format == 'NPTCHW':
num_proposals = results['num_proposals']
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((num_proposals, num_clips * clip_len) +
imgs.shape[1:])
# P x M x H x W x C
# M = N_clips x L
imgs = np.transpose(imgs, (0, 1, 4, 2, 3))
# P x M x C x H x W
results['imgs'] = imgs
results['input_shape'] = imgs.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(input_format='{self.input_format}')"
return repr_str
@PIPELINES.register_module()
class FormatAudioShape:
"""Format final audio shape to the given input_format.
Required keys are "imgs", "num_clips" and "clip_len", added or modified
keys are "imgs" and "input_shape".
Args:
input_format (str): Define the final imgs format.
"""
def __init__(self, input_format):
self.input_format = input_format
if self.input_format not in ['NCTF']:
raise ValueError(
f'The input format {self.input_format} is invalid.')
def __call__(self, results):
"""Performs the FormatShape formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
audios = results['audios']
# clip x sample x freq -> clip x channel x sample x freq
clip, sample, freq = audios.shape
audios = audios.reshape(clip, 1, sample, freq)
results['audios'] = audios
results['input_shape'] = audios.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(input_format='{self.input_format}')"
return repr_str
```
#### File: models/heads/ssn_head.py
```python
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
def parse_stage_config(stage_cfg):
"""Parse config of STPP for three stages.
Args:
stage_cfg (int | tuple[int]):
Config of structured temporal pyramid pooling.
Returns:
tuple[tuple[int], int]:
Config of structured temporal pyramid pooling and
total number of parts(number of multipliers).
"""
if isinstance(stage_cfg, int):
return (stage_cfg, ), stage_cfg
elif isinstance(stage_cfg, tuple):
return stage_cfg, sum(stage_cfg)
else:
raise ValueError(f'Incorrect STPP config {stage_cfg}')
class STPPTrain(nn.Module):
"""Structured temporal pyramid pooling for SSN at training.
Args:
stpp_stage (tuple): Config of structured temporal pyramid pooling.
Default: (1, (1, 2), 1).
num_segments_list (tuple): Number of segments to be sampled
in three stages. Default: (2, 5, 2).
"""
def __init__(self, stpp_stage=(1, (1, 2), 1), num_segments_list=(2, 5, 2)):
super().__init__()
starting_part, starting_multiplier = parse_stage_config(stpp_stage[0])
course_part, course_multiplier = parse_stage_config(stpp_stage[1])
ending_part, ending_multiplier = parse_stage_config(stpp_stage[2])
self.num_multipliers = (
starting_multiplier + course_multiplier + ending_multiplier)
self.stpp_stages = (starting_part, course_part, ending_part)
self.multiplier_list = (starting_multiplier, course_multiplier,
ending_multiplier)
self.num_segments_list = num_segments_list
def _extract_stage_feature(self, stage_feat, stage_parts, num_multipliers,
scale_factors, num_samples):
"""Extract stage feature based on structured temporal pyramid pooling.
Args:
stage_feat (torch.Tensor): Stage features to be STPP.
stage_parts (tuple): Config of STPP.
num_multipliers (int): Total number of parts in the stage.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
num_samples (int): Number of samples.
Returns:
torch.Tensor: Features of the stage.
"""
stage_stpp_feat = []
stage_len = stage_feat.size(1)
for stage_part in stage_parts:
ticks = torch.arange(0, stage_len + 1e-5,
stage_len / stage_part).int()
for i in range(stage_part):
part_feat = stage_feat[:, ticks[i]:ticks[i + 1], :].mean(
dim=1) / num_multipliers
if scale_factors is not None:
part_feat = (
part_feat * scale_factors.view(num_samples, 1))
stage_stpp_feat.append(part_feat)
return stage_stpp_feat
def forward(self, x, scale_factors):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
Returns:
tuple[torch.Tensor, torch.Tensor]:
Features for predicting activity scores and
completeness scores.
"""
x0 = self.num_segments_list[0]
x1 = x0 + self.num_segments_list[1]
num_segments = x1 + self.num_segments_list[2]
feat_dim = x.size(1)
x = x.view(-1, num_segments, feat_dim)
num_samples = x.size(0)
scale_factors = scale_factors.view(-1, 2)
stage_stpp_feats = []
stage_stpp_feats.extend(
self._extract_stage_feature(x[:, :x0, :], self.stpp_stages[0],
self.multiplier_list[0],
scale_factors[:, 0], num_samples))
stage_stpp_feats.extend(
self._extract_stage_feature(x[:, x0:x1, :], self.stpp_stages[1],
self.multiplier_list[1], None,
num_samples))
stage_stpp_feats.extend(
self._extract_stage_feature(x[:, x1:, :], self.stpp_stages[2],
self.multiplier_list[2],
scale_factors[:, 1], num_samples))
stpp_feat = torch.cat(stage_stpp_feats, dim=1)
course_feat = x[:, x0:x1, :].mean(dim=1)
return course_feat, stpp_feat
class STPPTest(nn.Module):
"""Structured temporal pyramid pooling for SSN at testing.
Args:
num_classes (int): Number of classes to be classified.
use_regression (bool): Whether to perform regression or not.
Default: True.
stpp_stage (tuple): Config of structured temporal pyramid pooling.
Default: (1, (1, 2), 1).
"""
def __init__(self,
num_classes,
use_regression=True,
stpp_stage=(1, (1, 2), 1)):
super().__init__()
self.activity_score_len = num_classes + 1
self.complete_score_len = num_classes
self.reg_score_len = num_classes * 2
self.use_regression = use_regression
starting_parts, starting_multiplier = parse_stage_config(stpp_stage[0])
course_parts, course_multiplier = parse_stage_config(stpp_stage[1])
ending_parts, ending_multiplier = parse_stage_config(stpp_stage[2])
self.num_multipliers = (
starting_multiplier + course_multiplier + ending_multiplier)
if self.use_regression:
self.feat_dim = (
self.activity_score_len + self.num_multipliers *
(self.complete_score_len + self.reg_score_len))
else:
self.feat_dim = (
self.activity_score_len +
self.num_multipliers * self.complete_score_len)
self.stpp_stage = (starting_parts, course_parts, ending_parts)
self.activity_slice = slice(0, self.activity_score_len)
self.complete_slice = slice(
self.activity_slice.stop, self.activity_slice.stop +
self.complete_score_len * self.num_multipliers)
self.reg_slice = slice(
self.complete_slice.stop, self.complete_slice.stop +
self.reg_score_len * self.num_multipliers)
def _pyramids_pooling(self, out_scores, index, raw_scores, ticks,
scale_factors, score_len, stpp_stage):
"""Perform pyramids pooling.
Args:
out_scores (torch.Tensor): Scores to be returned.
index (int): Index of output scores.
raw_scores (torch.Tensor): Raw scores before STPP.
ticks (list): Ticks of raw scores.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
score_len (int): Length of the score.
stpp_stage (tuple): Config of STPP.
"""
offset = 0
for stage_idx, stage_cfg in enumerate(stpp_stage):
if stage_idx == 0:
scale_factor = scale_factors[0]
elif stage_idx == len(stpp_stage) - 1:
scale_factor = scale_factors[1]
else:
scale_factor = 1.0
sum_parts = sum(stage_cfg)
tick_left = ticks[stage_idx]
tick_right = float(max(ticks[stage_idx] + 1, ticks[stage_idx + 1]))
if tick_right <= 0 or tick_left >= raw_scores.size(0):
offset += sum_parts
continue
for num_parts in stage_cfg:
part_ticks = torch.arange(tick_left, tick_right + 1e-5,
(tick_right - tick_left) /
num_parts).int()
for i in range(num_parts):
part_tick_left = part_ticks[i]
part_tick_right = part_ticks[i + 1]
if part_tick_right - part_tick_left >= 1:
raw_score = raw_scores[part_tick_left:part_tick_right,
offset *
score_len:(offset + 1) *
score_len]
raw_scale_score = raw_score.mean(dim=0) * scale_factor
out_scores[index, :] += raw_scale_score.detach().cpu()
offset += 1
return out_scores
def forward(self, x, proposal_ticks, scale_factors):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
proposal_ticks (list): Ticks of proposals to be STPP.
scale_factors (list): Ratios of the effective sampling lengths
to augmented lengths.
Returns:
tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
out_activity_scores (torch.Tensor): Activity scores
out_complete_scores (torch.Tensor): Completeness scores.
out_reg_scores (torch.Tensor): Regression scores.
"""
assert x.size(1) == self.feat_dim
num_ticks = proposal_ticks.size(0)
out_activity_scores = torch.zeros((num_ticks, self.activity_score_len),
dtype=x.dtype)
raw_activity_scores = x[:, self.activity_slice]
out_complete_scores = torch.zeros((num_ticks, self.complete_score_len),
dtype=x.dtype)
raw_complete_scores = x[:, self.complete_slice]
if self.use_regression:
out_reg_scores = torch.zeros((num_ticks, self.reg_score_len),
dtype=x.dtype)
raw_reg_scores = x[:, self.reg_slice]
else:
out_reg_scores = None
raw_reg_scores = None
for i in range(num_ticks):
ticks = proposal_ticks[i]
out_activity_scores[i, :] = raw_activity_scores[
ticks[1]:max(ticks[1] + 1, ticks[2]), :].mean(dim=0)
out_complete_scores = self._pyramids_pooling(
out_complete_scores, i, raw_complete_scores, ticks,
scale_factors[i], self.complete_score_len, self.stpp_stage)
if self.use_regression:
out_reg_scores = self._pyramids_pooling(
out_reg_scores, i, raw_reg_scores, ticks, scale_factors[i],
self.reg_score_len, self.stpp_stage)
return out_activity_scores, out_complete_scores, out_reg_scores
@HEADS.register_module()
class SSNHead(nn.Module):
"""The classification head for SSN.
Args:
dropout_ratio (float): Probability of dropout layer. Default: 0.8.
in_channels (int): Number of channels for input data. Default: 1024.
num_classes (int): Number of classes to be classified. Default: 20.
consensus (dict): Config of segmental consensus.
use_regression (bool): Whether to perform regression or not.
Default: True.
init_std (float): Std value for Initiation. Default: 0.001.
"""
def __init__(self,
dropout_ratio=0.8,
in_channels=1024,
num_classes=20,
consensus=dict(
type='STPPTrain',
standalong_classifier=True,
stpp_cfg=(1, 1, 1),
num_seg=(2, 5, 2)),
use_regression=True,
init_std=0.001):
super().__init__()
self.dropout_ratio = dropout_ratio
self.num_classes = num_classes
self.use_regression = use_regression
self.init_std = init_std
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
# Based on this copy, the model will utilize different
# structured temporal pyramid pooling at training and testing.
# Warning: this copy cannot be removed.
consensus_ = consensus.copy()
consensus_type = consensus_.pop('type')
if consensus_type == 'STPPTrain':
self.consensus = STPPTrain(**consensus_)
elif consensus_type == 'STPPTest':
consensus_['num_classes'] = self.num_classes
self.consensus = STPPTest(**consensus_)
self.in_channels_activity = in_channels
self.in_channels_complete = (
self.consensus.num_multipliers * in_channels)
self.activity_fc = nn.Linear(in_channels, num_classes + 1)
self.completeness_fc = nn.Linear(self.in_channels_complete,
num_classes)
if self.use_regression:
self.regressor_fc = nn.Linear(self.in_channels_complete,
num_classes * 2)
def init_weights(self):
"""Initiate the parameters from scratch."""
normal_init(self.activity_fc, std=self.init_std)
normal_init(self.completeness_fc, std=self.init_std)
if self.use_regression:
normal_init(self.regressor_fc, std=self.init_std)
def prepare_test_fc(self, stpp_feat_multiplier):
"""Reorganize the shape of fully connected layer at testing, in order
to improve testing efficiency.
Args:
stpp_feat_multiplier (int): Total number of parts.
Returns:
bool: Whether the shape transformation is ready for testing.
"""
in_features = self.activity_fc.in_features
out_features = (
self.activity_fc.out_features +
self.completeness_fc.out_features * stpp_feat_multiplier)
if self.use_regression:
out_features += (
self.regressor_fc.out_features * stpp_feat_multiplier)
self.test_fc = nn.Linear(in_features, out_features)
# Fetch weight and bias of the reorganized fc.
complete_weight = self.completeness_fc.weight.data.view(
self.completeness_fc.out_features, stpp_feat_multiplier,
in_features).transpose(0, 1).contiguous().view(-1, in_features)
complete_bias = self.completeness_fc.bias.data.view(1, -1).expand(
stpp_feat_multiplier, self.completeness_fc.out_features
).contiguous().view(-1) / stpp_feat_multiplier
weight = torch.cat((self.activity_fc.weight.data, complete_weight))
bias = torch.cat((self.activity_fc.bias.data, complete_bias))
if self.use_regression:
reg_weight = self.regressor_fc.weight.data.view(
self.regressor_fc.out_features, stpp_feat_multiplier,
in_features).transpose(0,
1).contiguous().view(-1, in_features)
reg_bias = self.regressor_fc.bias.data.view(1, -1).expand(
stpp_feat_multiplier, self.regressor_fc.out_features
).contiguous().view(-1) / stpp_feat_multiplier
weight = torch.cat((weight, reg_weight))
bias = torch.cat((bias, reg_bias))
self.test_fc.weight.data = weight
self.test_fc.bias.data = bias
return True
def forward(self, x, test_mode=False):
"""Defines the computation performed at every call."""
if not test_mode:
x, proposal_scale_factor = x
activity_feat, completeness_feat = self.consensus(
x, proposal_scale_factor)
if self.dropout is not None:
activity_feat = self.dropout(activity_feat)
completeness_feat = self.dropout(completeness_feat)
activity_scores = self.activity_fc(activity_feat)
complete_scores = self.completeness_fc(completeness_feat)
if self.use_regression:
bbox_preds = self.regressor_fc(completeness_feat)
bbox_preds = bbox_preds.view(-1,
self.completeness_fc.out_features,
2)
else:
bbox_preds = None
return activity_scores, complete_scores, bbox_preds
else:
x, proposal_tick_list, scale_factor_list = x
test_scores = self.test_fc(x)
(activity_scores, completeness_scores,
bbox_preds) = self.consensus(test_scores, proposal_tick_list,
scale_factor_list)
return (test_scores, activity_scores, completeness_scores,
bbox_preds)
```
#### File: tests/test_data/test_ava_dataset.py
```python
import os.path as osp
import mmcv
import numpy as np
from numpy.testing import assert_array_equal
from mmaction.datasets import AVADataset
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
class TestAVADataset(object):
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(
osp.dirname(osp.dirname(__file__)), 'data', 'test_ava_dataset')
cls.ann_file = osp.join(cls.data_prefix, 'ava_sample.csv')
cls.exclude_file = osp.join(cls.data_prefix,
'ava_excluded_timestamps_sample.csv')
cls.proposal_file = osp.join(cls.data_prefix,
'ava_proposals_sample.pkl')
cls.pipeline = [
dict(dict(type='SampleAVAFrames', clip_len=32, frame_interval=2))
]
cls.proposal = mmcv.load(cls.proposal_file)
def test_ava_dataset(self):
target_keys = [
'frame_dir', 'video_id', 'timestamp', 'img_key', 'shot_info',
'fps', 'ann'
]
ann_keys = ['labels', 'entity_boxes', 'entity_ids']
pkl_keys = ['0f39OWEqJ24,0902', '0f39OWEqJ24,0903', <KEY>']
ava_dataset = AVADataset(
self.ann_file,
self.exclude_file,
self.pipeline,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
ava_infos = ava_dataset.video_infos
assert check_keys_contain(ava_dataset.proposals.keys(), pkl_keys)
assert check_keys_contain(ava_infos[0].keys(), target_keys)
assert check_keys_contain(ava_infos[0]['ann'].keys(), ann_keys)
assert len(ava_infos) == 1
assert ava_infos[0]['frame_dir'] == osp.join(self.data_prefix,
'0f39OWEqJ24')
assert ava_infos[0]['video_id'] == '0f39OWEqJ24'
assert ava_infos[0]['timestamp'] == 902
assert ava_infos[0]['img_key'] == '0f39OWEqJ24,0902'
assert ava_infos[0]['shot_info'] == (0, 26880)
assert ava_infos[0]['fps'] == 30
assert len(ava_infos[0]['ann']) == 3
target_labels = np.array([12, 17, 79] + [
-1,
] * 78)
target_labels = target_labels[None, ...]
assert_array_equal(ava_infos[0]['ann']['labels'], target_labels)
assert_array_equal(ava_infos[0]['ann']['entity_boxes'],
np.array([[0.031, 0.162, 0.67, 0.995]]))
assert_array_equal(ava_infos[0]['ann']['entity_ids'], np.array([0]))
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
ava_infos = ava_dataset.video_infos
assert len(ava_infos) == 3
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
test_mode=True,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
ava_infos = ava_dataset.video_infos
assert len(ava_infos) == 3
ava_dataset = AVADataset(
self.ann_file,
None,
self.pipeline,
test_mode=True,
data_prefix=self.data_prefix,
proposal_file=None)
assert ava_dataset.proposals is None
def test_ava_pipeline(self):
target_keys = [
'frame_dir', 'video_id', 'timestamp', 'img_key', 'shot_info',
'fps', 'ann', 'filename_tmpl', 'modality', 'start_index',
'timestamp_start', 'timestamp_end', 'proposals', 'frame_inds',
'clip_len', 'frame_interval'
]
ann_keys = ['labels', 'entity_boxes', 'entity_ids']
ava_dataset = AVADataset(
self.ann_file,
self.exclude_file,
self.pipeline,
data_prefix=self.data_prefix,
proposal_file=self.proposal_file)
result = ava_dataset[0]
assert check_keys_contain(result.keys(), target_keys)
assert check_keys_contain(result['ann'].keys(), ann_keys)
assert result['filename_tmpl'] == 'img_{:05}.jpg'
assert result['modality'] == 'RGB'
assert result['start_index'] == 1
assert result['timestamp_start'] == 902
assert result['timestamp_end'] == 1798
assert_array_equal(result['proposals'],
np.array([[0.011, 0.157, 0.655, 0.983, 0.998163]]))
assert result['clip_len'] == 32
assert result['frame_interval'] == 2
assert len(result['frame_inds']) == 32
```
#### File: tests/test_data/test_compose.py
```python
import numpy as np
import pytest
from mmaction.datasets.pipelines import Compose, ImageToTensor
def check_keys_equal(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys) == set(result_keys)
def test_compose():
with pytest.raises(TypeError):
# transform must be callable or a dict
Compose('LoadImage')
target_keys = ['img', 'img_meta']
# test Compose given a data pipeline
img = np.random.randn(256, 256, 3)
results = dict(img=img, abandoned_key=None, img_name='test_image.png')
test_pipeline = [
dict(type='Collect', keys=['img'], meta_keys=['img_name']),
dict(type='ImageToTensor', keys=['img'])
]
compose = Compose(test_pipeline)
compose_results = compose(results)
assert check_keys_equal(compose_results.keys(), target_keys)
assert check_keys_equal(compose_results['img_meta'].data.keys(),
['img_name'])
# test Compose when forward data is None
results = None
image_to_tensor = ImageToTensor(keys=[])
test_pipeline = [image_to_tensor]
compose = Compose(test_pipeline)
compose_results = compose(results)
assert compose_results is None
assert repr(compose) == compose.__class__.__name__ + \
f'(\n {image_to_tensor}\n)'
```
#### File: mmaction2/tests/test_gradcam.py
```python
import os.path as osp
import mmcv
import numpy as np
import pytest
import torch
from mmaction.models import build_recognizer
from mmaction.utils.gradcam_utils import GradCAM
def _get_cfg(fname):
"""Grab configs necessary to create a recognizer.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
repo_dpath = osp.dirname(osp.dirname(__file__))
config_dpath = osp.join(repo_dpath, 'configs/recognition')
config_fpath = osp.join(config_dpath, fname)
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config = mmcv.Config.fromfile(config_fpath)
return config
def _get_target_shapes(input_shape, num_classes=400, model_type='2D'):
if model_type not in ['2D', '3D']:
raise ValueError(f'Data type {model_type} is not available')
preds_target_shape = (input_shape[0], num_classes)
if model_type == '3D':
# input shape (batch_size, num_crops*num_clips, C, clip_len, H, W)
# target shape (batch_size*num_crops*num_clips, clip_len, H, W, C)
blended_imgs_target_shape = (input_shape[0] * input_shape[1],
input_shape[3], input_shape[4],
input_shape[5], input_shape[2])
else:
# input shape (batch_size, num_segments, C, H, W)
# target shape (batch_size, num_segments, H, W, C)
blended_imgs_target_shape = (input_shape[0], input_shape[1],
input_shape[3], input_shape[4],
input_shape[2])
return blended_imgs_target_shape, preds_target_shape
def _generate_gradcam_inputs(input_shape=(1, 3, 3, 224, 224), model_type='2D'):
"""Create a superset of inputs needed to run gradcam.
Args:
input_shape (tuple[int]): input batch dimensions.
Default: (1, 3, 3, 224, 224).
model_type (str): Model type for data generation, from {'2D', '3D'}.
Default:'2D'
return:
dict: model inputs, including two keys, ``imgs`` and ``label``.
"""
imgs = np.random.random(input_shape)
if model_type in ['2D', '3D']:
gt_labels = torch.LongTensor([2] * input_shape[0])
else:
raise ValueError(f'Data type {model_type} is not available')
inputs = {
'imgs': torch.FloatTensor(imgs),
'label': gt_labels,
}
return inputs
def _do_test_2D_models(recognizer,
target_layer_name,
input_shape,
num_classes=400,
device='cpu'):
demo_inputs = _generate_gradcam_inputs(input_shape)
demo_inputs['imgs'] = demo_inputs['imgs'].to(device)
demo_inputs['label'] = demo_inputs['label'].to(device)
recognizer = recognizer.to(device)
gradcam = GradCAM(recognizer, target_layer_name)
blended_imgs_target_shape, preds_target_shape = _get_target_shapes(
input_shape, num_classes=num_classes, model_type='2D')
blended_imgs, preds = gradcam(demo_inputs)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
blended_imgs, preds = gradcam(demo_inputs, True)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
def _do_test_3D_models(recognizer,
target_layer_name,
input_shape,
num_classes=400):
blended_imgs_target_shape, preds_target_shape = _get_target_shapes(
input_shape, num_classes=num_classes, model_type='3D')
demo_inputs = _generate_gradcam_inputs(input_shape, '3D')
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
demo_inputs['imgs'] = demo_inputs['imgs'].cuda()
demo_inputs['label'] = demo_inputs['label'].cuda()
gradcam = GradCAM(recognizer, target_layer_name)
blended_imgs, preds = gradcam(demo_inputs)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
blended_imgs, preds = gradcam(demo_inputs, True)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
else:
gradcam = GradCAM(recognizer, target_layer_name)
blended_imgs, preds = gradcam(demo_inputs)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
blended_imgs, preds = gradcam(demo_inputs, True)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
def test_tsn():
config = _get_cfg('tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 25, 3, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_2D_models(recognizer, target_layer_name, input_shape)
def test_i3d():
config = _get_cfg('i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = [1, 1, 3, 32, 32, 32]
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_r2plus1d():
config = _get_cfg('r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
config.model['backbone']['norm_cfg'] = dict(type='BN3d')
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 3, 3, 8, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_slowfast():
config = _get_cfg('slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py')
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 1, 3, 32, 32, 32)
target_layer_name = 'backbone/slow_path/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_tsm():
config = _get_cfg('tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
target_layer_name = 'backbone/layer4/1/relu'
# base config
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 8, 3, 32, 32)
_do_test_2D_models(recognizer, target_layer_name, input_shape)
# test twice sample + 3 crops, 2*3*8=48
test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model, test_cfg=test_cfg)
recognizer.cfg = config
input_shape = (1, 48, 3, 32, 32)
_do_test_2D_models(recognizer, target_layer_name, input_shape)
def test_csn():
config = _get_cfg(
'csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 1, 3, 32, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_tpn():
target_layer_name = 'backbone/layer4/1/relu'
config = _get_cfg('tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 8, 3, 32, 32)
_do_test_2D_models(recognizer, target_layer_name, input_shape, 174)
config = _get_cfg('tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 3, 3, 8, 32, 32)
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_c3d():
config = _get_cfg('c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 1, 3, 16, 112, 112)
target_layer_name = 'backbone/conv5a/activate'
_do_test_3D_models(recognizer, target_layer_name, input_shape, 101)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_tin():
config = _get_cfg('tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
target_layer_name = 'backbone/layer4/1/relu'
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 8, 3, 64, 64)
_do_test_2D_models(
recognizer, target_layer_name, input_shape, device='cuda:0')
def test_x3d():
config = _get_cfg('x3d/x3d_s_13x6x1_facebook_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model, test_cfg=config.test_cfg)
recognizer.cfg = config
input_shape = (1, 1, 3, 13, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
``` |
{
"source": "Jinsan-Dev/BaroDetector",
"score": 3
} |
#### File: Data/Preprocessed-data/feature_extraction.py
```python
import os
import numpy as np
import statistics as stat
import csv
import sys
from scipy.stats import kurtosis
def getZeroCrossingRate(arr):
np_array = np.array(arr)
return float("{0:.4f}".format((((np_array[:-1] * np_array[1:]) < 0).sum()) / len(arr)))
def getMeanCrossingRate(arr):
return getZeroCrossingRate(np.array(arr) - np.mean(arr))
def getRateOfChange(arr,first,last):
np_array = np.array(arr)
return abs(np_array[last]-np_array[first])/window_size # passing 이라는 이벤트라는게 발생했다를 캐치하기 위해서
#return (np_array[1:] / np_array[:-1] - 1).sum()
def getKurtosis(arr):
np_array = np.array(arr)
kur = kurtosis(np_array,fisher=True)
return kur
def getIrange(arr):
np_array = np.array(arr)
return abs(np.percentile(np_array,75) - np.percentile(np_array,25))
def power(list):
return [x**2 for x in list]
def getRootMeanSquare(arr):
np_array = np.array(arr)
np_array = power(np_array)
return np.sqrt(np.sum(np_array)/float(len(np_array)))
def getRootSumSquare(arr):
np_array = np.array(arr)
np_array = power(np_array)
return np.sqrt(np.sum(np_array))
def getLabel(arr):
np_array = np.array(arr)
if "Non" not in np_array:
return LABEL_PASSING
else:
return "Non"
window_size = 40
overlap = 5
LABEL_PASSING = "passing"
roc = []
mcr = []
std = []
iran = []
kur = []
rms = []
rss = []
absDiff = []
label = []
if __name__=="__main__":
for root, dirs, files in os.walk("./"):
for file_name in files:
if os.path.splitext(file_name)[-1] == '.csv': # Depends on file type
with open(file_name, 'r',encoding = 'ISO-8859-1') as f:
reader = csv.reader(f)
diff = []
window_arr_pressure = []
window_arr_label = []
for txt in reader:
#vals = line[:-1].split(",") # 맨 끝의 \n 제외한 것들을 , 기준으로 나눔
window_arr_pressure.append(float(txt[0]))
diff.append(txt[2])
if str(txt[1]) == LABEL_PASSING:
window_arr_label.append(LABEL_PASSING)
else:
window_arr_label.append("Non")
for index, line in enumerate(window_arr_pressure):
if index+window_size < len(window_arr_pressure):
roc.append(float(getRateOfChange(window_arr_pressure,index,index+window_size))) # Rate of change
mcr.append(float(getMeanCrossingRate(window_arr_pressure[index:index+window_size]))) # MCR from previous 30 num of data
std.append(float(stat.stdev(window_arr_pressure[index:index+window_size]))) # STD from previous 30 num of data
iran.append(float(getIrange(window_arr_pressure[index:index+window_size]))) # interquartile range
kur.append(float(getKurtosis(window_arr_pressure[index:index+window_size])))
rms.append(float(getRootMeanSquare(window_arr_pressure[index:index+window_size])))
rss.append(float(getRootSumSquare(window_arr_pressure[index:index+window_size])))
absDiff.append(diff[index])
label.append(getLabel(window_arr_label[index:index+window_size])) # each label
#arff file write
with open('./arff_files/'+'result.arff','w',newline='') as f: # make arff file format
f.write('''@RELATION pressure
@attribute roc numeric
@attribute mcr numeric
@attribute std numeric
@attribute iran numeric
@attribute kurtosis numeric
@attribute rss numeric
@attribute rms numeric
@attribute absDiff numeric
@attribute label {passing, Non}
@data
''')
for index, line in enumerate(roc):
#f.write(str(iran[index])+ "," +label[index]+"\n")
f.write(str(roc[index])+","+str(mcr[index])+","+str(std[index]) + "," + str(iran[index]) + "," + str(kur[index]) + "," + str(rss[index]) + "," +str(rms[index]) + "," +str(absDiff[index]) + "," +label[index]+"\n")
``` |
{
"source": "jinsanity07git/tmip-emat",
"score": 3
} |
#### File: tmip-emat/docs/sphinx-jupyter-widgets-cleanup.py
```python
import argparse, os
parser = argparse.ArgumentParser()
parser.add_argument('outdir', type=str, help='sphinx output directory')
args = parser.parse_args()
import re
duplicate_tag = '''(<script src="https://unpkg.com/@jupyter-widgets/html-manager@\^[0-9]*\.[0-9]*\.[0-9]*/dist/embed-amd.js"></script>)'''
bad1 = re.compile(duplicate_tag)
bad2 = re.compile(duplicate_tag+"(.*)"+duplicate_tag)
def dedupe_jupyter_widgets_manager(filename):
with open(filename, 'rt') as html_in:
content = html_in.read()
n = len(bad1.findall(content))
if n>1:
content_1 = bad1.sub("", content, count=n-1)
print(f"FIXING [{n}]:",filename)
with open(filename, 'wt') as html_out:
html_out.write(content_1)
else:
print(f"PASSED [{n}]:",filename)
def fixing_walker(filename):
directory = os.path.dirname(os.path.abspath(filename))
for dirpath, dirnames, filenames in os.walk(directory):
for f in filenames:
if f[-5:]==".html":
this_file = os.path.join(dirpath, f)
dedupe_jupyter_widgets_manager(this_file)
fixing_walker(args.outdir)
```
#### File: explore_2/components/two_dim_figure.py
```python
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from ....viz import colors
def empty_two_dim_figure(
use_gl=True,
marker_opacity=(1.0,1.0),
):
"""
Args:
use_gl (bool, default True):
Use the WebGL versions of plots.
marker_opacity (tuple, default (1.0,1.0)):
The opacity of (unselected, selected) markers in
the scatter plot.
Returns:
figure
"""
Scatter = go.Scattergl if use_gl else go.Scatter
scattergraph = Scatter(
x=None,
y=None,
mode='markers',
marker=dict(
opacity=marker_opacity[0],
color=None,
colorscale=[[0, colors.DEFAULT_BASE_COLOR], [1, colors.DEFAULT_HIGHLIGHT_COLOR]],
cmin=0,
cmax=1,
),
name='Cases',
)
x_hist = go.Histogram(
x=None,
# name='x density',
marker=dict(
color=colors.DEFAULT_BASE_COLOR,
# opacity=0.7,
),
yaxis='y2',
bingroup='xxx',
)
y_hist = go.Histogram(
y=None,
# name='y density',
marker=dict(
color=colors.DEFAULT_BASE_COLOR,
# opacity=0.7,
),
xaxis='x2',
bingroup='yyy',
)
x_hist_s = go.Histogram(
x=None,
marker=dict(
color=colors.DEFAULT_HIGHLIGHT_COLOR,
# opacity=0.7,
),
yaxis='y2',
bingroup='xxx',
)
y_hist_s = go.Histogram(
y=None,
marker=dict(
color=colors.DEFAULT_HIGHLIGHT_COLOR,
# opacity=0.7,
),
xaxis='x2',
bingroup='yyy',
)
fig = go.Figure()
scattergraph = fig.add_trace(scattergraph).data[-1]
x_hist = fig.add_trace(x_hist).data[-1]
y_hist = fig.add_trace(y_hist).data[-1]
x_hist_s = fig.add_trace(x_hist_s).data[-1]
y_hist_s = fig.add_trace(y_hist_s).data[-1]
fig.layout = dict(
xaxis=dict(
domain=[0, 0.85],
showgrid=True,
# title=self._df.data.columns[0],
),
yaxis=dict(
domain=[0, 0.85],
showgrid=True,
# title=self._df.data.columns[-1],
),
xaxis2=dict(
domain=[0.85, 1],
showgrid=True,
zeroline=True,
zerolinecolor='#FFF',
zerolinewidth=4,
),
yaxis2=dict(
domain=[0.85, 1],
showgrid=True,
zeroline=True,
zerolinecolor='#FFF',
zerolinewidth=4,
),
barmode="overlay",
showlegend=False,
margin=dict(l=10, r=10, t=10, b=10),
dragmode="lasso",
)
return fig
```
#### File: emat/interactive/multitoggle.py
```python
from ipywidgets import Box, widget_selection, ToggleButton, Layout, Label, Widget
import traitlets
class MultiToggleButtons(Box):
description = traitlets.Unicode()
value = traitlets.Tuple()
options = traitlets.Union([traitlets.List(), traitlets.Dict()])
style = traitlets.Dict()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._selection_obj = widget_selection._MultipleSelection()
traitlets.link((self, 'options'), (self._selection_obj, 'options'))
traitlets.link((self, 'value'), (self._selection_obj, 'value'))
@observer(self, 'options')
def _(*_):
self.buttons = [ToggleButton(description=label,
layout=Layout(
margin='1',
width='auto'
))
for label in self._selection_obj._options_labels]
if self.description:
self.label = Label(self.description, layout=Layout(width=self.style.get('description_width', '100px')))
else:
self.label = Label(self.description, layout=Layout(width=self.style.get('description_width', '0px')))
self.children = [self.label]+self.buttons
@observer(self.buttons, 'value')
def _(*_):
self.value = tuple(value
for btn, value in zip(self.buttons, self._selection_obj._options_values)
if btn.value)
self.add_class('btn-group')
def reset(self):
opts = self.options
self.options = []
self.options = opts
def set_value(self, x):
for b, opt in zip(self.buttons, self.options):
b.value = (opt in x)
def set_all_on(self):
for b, opt in zip(self.buttons, self.options):
b.value = True
def set_all_off(self):
for b, opt in zip(self.buttons, self.options):
b.value = False
def observer(widgets, trait_name):
def wrapper(func):
if isinstance(widgets, Widget):
widgets.observe(func, trait_name)
else:
for w in widgets:
w.observe(func, trait_name)
func()
return wrapper
```
#### File: emat/interactive/prototype_data.py
```python
import numpy, pandas
import ipywidgets
from ipywidgets import interactive
import matplotlib.pyplot as plt
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from .lazy import lazy
from .prototype_logging import logger
from ..database import SQLiteDB, Database
from .prototype_pane import ExplorerComponent
class ExplorerData(ExplorerComponent):
def __init__(
self,
parent,
):
super().__init__(parent_widget=parent)
self._all_performance_measures = None
self._all_strategy_names = None
self._all_risk_factor_names = None
def load_from_database(
self,
performance_measures=None,
risk_factor_names=None,
strategy_names=None,
feature_names=None,
design_name = None,
):
if not isinstance(self.db, Database):
raise ValueError('db not ready')
if design_name is None:
design_name = self.design_name
self.X = self.db.read_experiment_parameters(self.scope.name, design_name)
self.Y = self.db.read_experiment_measures(self.scope.name, design_name)
if feature_names is not None:
performance_measures = [i for i in feature_names if i in self.all_performance_measures_]
risk_factor_names = [i for i in feature_names if i in self.all_risk_factors_]
strategy_names = [i for i in feature_names if i in self.all_strategy_names_]
if performance_measures is None:
self.performance_measures = self.all_performance_measures
else:
self.performance_measures = list(performance_measures)
if strategy_names is None:
self.strategy_names = self.all_strategy_names
else:
self.strategy_names = list(strategy_names)
if risk_factor_names is None:
self.risk_factor_names = self.all_risk_factors
else:
self.risk_factor_names = list(risk_factor_names)
self.X = self.X.loc[:, self.risk_factor_names+self.strategy_names]
try:
self.Y = self.Y.loc[:, performance_measures]
except KeyError:
logger.debug(":: The columns of Y include:")
for col in self.Y.columns:
logger.debug(f":: * {col}")
raise
Y_millions = self.Y.min(axis=0) > 1e6
Y_thousands = (self.Y.min(axis=0) > 1e3) & (~Y_millions)
self.Y.loc[:, Y_millions] /= 1e6
self.Y.loc[:, Y_thousands] /= 1e3
Y_millions.loc[:] = False
Y_thousands.loc[:] = False
self.joint_data = pandas.concat([self.X, self.Y], axis=1)
# self.risk_factor_names = [i for i in self.X.columns if i not in self.strategy_names]
# self.performance_measure_names = self.Y.columns
# def workbench_format(self):
# try:
# self.X
# self.Y
# except AttributeError:
# # must load from the database if not already in memory
# self.load_from_database()
#
# experiments = self.X.copy()
# for strategy in self.strategy_names:
# # We presently assume all strategies are categorical (namely, binary)
# # this could be relaxed in the future
# # but it will always be important to explicitly transform datatypes here
# # for categorical inputs (strategies OR risks)
# experiments[strategy] = (experiments[strategy]>0).astype(str)
# y = {
# k:self.Y[k].values
# for k in self.Y.columns
# }
# return experiments.to_records(), y
# def singleton(self, qry):
# try:
# return self.engine.execute(qry).fetchone()[0]
# except TypeError:
# return
#
#
# def data_X(self):
# X = pandas.read_sql_query(f"""
# SELECT
# sampleID, varID, value
# FROM
# perfMeasInput
# ORDER BY
# sampleID, varID
# """, self.engine).pivot(index='sampleID', columns='varID', values='value')
# X.columns = [
# (self.singleton(f"SELECT riskVarDesc FROM riskVar WHERE riskVarID={j}")
# or self.singleton(f"SELECT strategyDesc FROM strategy WHERE strategyID={j}")
# or j)
# for j in X.columns
# ]
# return X
#
#
# def data_Y(self):
# Y = pandas.read_sql_query(f"""
# SELECT
# sampleID, perfMeasID, estimate
# FROM
# perfMeasRes
# ORDER BY
# sampleID, perfMeasID
# """, self.engine).pivot(index='sampleID', columns='perfMeasID', values='estimate')
# Y.columns = [
# (self.singleton(f"SELECT perfMeasDesc FROM perfMeas WHERE perfMeasID={j}")
# or j)
# for j in Y.columns
# ]
# Y = numpy.exp(Y)
# return Y
#
@lazy
def all_risk_factors(self):
assert isinstance(self.db, Database)
return self.db.read_uncertainties(self.scope.name)
@lazy
def all_strategy_names(self):
assert isinstance(self.db, Database)
return self.db.read_levers(self.scope.name)
@lazy
def all_performance_measures(self):
assert isinstance(self.db, Database)
return self.db.read_measures(self.scope.name)
@lazy
def all_risk_factors_(self):
return set(self.all_risk_factors)
@lazy
def all_strategy_names_(self):
return set(self.all_strategy_names)
@lazy
def all_performance_measures_(self):
return set(self.all_performance_measures)
# def query(self, query):
# "An arbitrary database query. Be careful!"
# return pandas.read_sql_query(query, self.engine)
#
# def table_schema(self, tablename):
# return self.singleton(f"SELECT sql FROM sqlite_master WHERE type='table' AND name='{tablename}'")
```
#### File: emat/interactive/prototype_scope.py
```python
import os
import sqlite3
from ipywidgets import (
VBox, HBox, Label, Layout, interactive_output, Dropdown, Output,
Button, ToggleButtons, FloatRangeSlider, Accordion, Text
)
from .prototype_pane import ExplorerPane
from IPython.display import display, HTML
from ..database import Database
import __main__
class ExplorerScopeManager(ExplorerPane):
def __init__(self, parent_widget=None, proposed_db_name='db'):
super().__init__(parent_widget=parent_widget)
if isinstance(proposed_db_name, Database):
self.filenamer = Text(
value=proposed_db_name.get_db_info(),
placeholder='db info',
description='DB:',
disabled=True
)
self.db = proposed_db_name
else:
self.filenamer = Text(
value=proposed_db_name,
placeholder='enter object name',
description='DB:',
disabled=False
)
self.scope_picker = Dropdown(
options=[
' ',
],
value=' ',
description='Scope:',
disabled=False,
)
self.design_picker = Dropdown(
options=[
' ',
],
value=' ',
description='Design:',
disabled=False,
)
self.filenamer.observe(self.check_db_status)
self.scope_picker.observe(self.update_current_scope_from_db)
self.design_picker.observe(self.update_current_design_from_db)
self.make_stack(
self.filenamer,
self.scope_picker,
self.design_picker,
)
self.next_button.on_click(lambda x: self._parent_widget.load_selection_library())
with self.header:
display(HTML("<h1>Scope Manager</h1>"))
self.check_db_status(None)
def check_db_status(self, action_content):
main_object_name = self.filenamer.value
self.footer.clear_output()
with self.footer:
if main_object_name in __main__.__dict__:
db = getattr(__main__, main_object_name)
if isinstance(db, Database):
print(f"√ '{main_object_name}' is Database")
self.db = db
else:
print(f"X '{main_object_name}' not Database")
self.db = None
elif self.db:
print(f"√ {self.db.get_db_info()}")
else:
print(f"X '{main_object_name}' not a known object")
self.db = None
self._update_scope_name_choices()
self._update_design_name_choices()
self.update_current_scope_from_db(action_content)
if self.scope is not None:
print(f"√ Loaded Scope: {self.scope.name}")
else:
print(f"X No Scope")
def _update_scope_name_choices(self):
if self.db is None:
self.scope_picker.options = [' ']
else:
scope_names = self.db.read_scope_names()
if self.scope_picker.options != scope_names:
self.scope_picker.options = scope_names
if len(scope_names) == 1:
self.update_current_scope_from_db(None)
def _update_design_name_choices(self):
if self.scope is None:
self.design_picker.options = [' ']
else:
design_names = self.db.read_design_names(self.scope.name)
if self.design_picker.options != design_names:
self.design_picker.options = design_names
if len(design_names)==1:
self.update_current_design_from_db(None)
def update_current_scope_from_db(self, action_content):
scope_name = self.scope_picker.value
if self.db is not None:
if self.scope is not None:
if self.scope.name == scope_name:
return
self.scope = self.db.read_scope(scope_name)
else:
self.scope = None
def update_current_design_from_db(self, action_content):
self.design_name = self.design_picker.value
def check_file_status(self, action_content):
f = self.filenamer.value
self.footer.clear_output()
with self.footer:
if os.path.exists(f):
print("√ File Exists")
fa = os.path.abspath(f)
checker = None
try:
checker = sqlite3.connect(
f'file:{f}?mode=ro',
uri=True
)
except Exception as err:
print("X ERROR")
print(str(err))
else:
try:
checker.cursor().execute("SELECT strategyDesc FROM strategy LIMIT 1")
except Exception as err:
print("X ERROR")
print(str(err))
else:
print("√ File has 'strategy' table")
try:
checker.cursor().execute("SELECT riskVarDesc FROM riskVar LIMIT 1")
except Exception as err:
print("X ERROR")
print(str(err))
else:
print("√ File has 'riskVar' table")
try:
checker.cursor().execute("SELECT perfMeasDesc FROM perfMeas LIMIT 1")
except Exception as err:
print("X ERROR")
print(str(err))
else:
print("√ File has 'perfMeas' table")
try:
checker.cursor().execute("SELECT perfMeasID FROM perfMeasRes LIMIT 1")
except Exception as err:
print("X ERROR")
print(str(err))
else:
print("√ File has 'perfMeasRes' table")
try:
checker.cursor().execute("SELECT sampleID, varID, value FROM perfMeasInput LIMIT 1")
except Exception as err:
print("X ERROR")
print(str(err))
else:
print("√ File has 'perfMeasInput' table")
finally:
if checker is not None:
checker.close()
else:
print("X File Does Not Exist")
```
#### File: emat/learn/chaining.py
```python
from sklearn.multioutput import RegressorChain as _RegressorChain
from .ensemble import VotingRegressor
from .frameable import FrameableMixin
class RegressorChain(
_RegressorChain,
FrameableMixin,
):
def fit(self, X, Y):
self._pre_fit(X, Y)
return super().fit(X,Y)
def predict(self, X):
Y = super().predict(X)
Y = self._post_predict(X, Y)
return Y
def EnsembleRegressorChain(
base_estimator,
n_chains,
):
ensemble = []
for n in range(n_chains):
e = RegressorChain(
base_estimator=base_estimator,
order='random',
random_state=n,
)
ensemble.append((f'chain_{n}',e))
return VotingRegressor(
ensemble
)
```
#### File: emat/learn/preprocessing.py
```python
import pandas
from sklearn.preprocessing import PolynomialFeatures as _PolynomialFeatures
class PolynomialFeatures(_PolynomialFeatures):
"""
Generate polynomial and interaction features, preserving DataFrame labels.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
order : str in {'C', 'F'}, default 'C'
Order of output array in the dense case. 'F' order is faster to
compute, but may slow down subsequent estimators.
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
"""
def transform(self, X):
try:
y = super().transform(X)
except:
print(X.shape)
print(self.n_input_features_)
raise
if isinstance(X, pandas.DataFrame):
return pandas.DataFrame(
data=y,
index=X.index,
columns=self.get_feature_names(X.columns),
)
return y
```
#### File: emat/learn/splits.py
```python
import numpy as np
import warnings
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.model_selection._split import _RepeatedSplits
from sklearn.utils.validation import column_or_1d
from sklearn.utils.validation import check_array
from sklearn.utils.multiclass import type_of_target
class ExogenouslyStratifiedKFold(StratifiedKFold):
"""Exogenously Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
an exogenously defined factor for each class.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
Examples
--------
>>> from sklearn.model_selection import ExogenouslyStratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_splits)``, the last one has
the complementary.
See also
--------
RepeatedExogenouslyStratifiedKFold: Repeats Exogenously Stratified K-Fold n times.
"""
def __init__(self, exo_data=None, n_splits=3, shuffle=False, random_state=None):
super().__init__(n_splits, shuffle, random_state)
self.exo_data = exo_data
def _make_test_folds(self, X, y=None):
rng = self.random_state
if self.exo_data is not None:
if y.shape[0] != self.exo_data.shape[0]:
raise ValueError(f"bad shape of exo_data, y.shape={y.shape}, self.exo_data.shape={self.exo_data.shape}")
y = self.exo_data
y = np.asarray(y)
type_of_target_y = type_of_target(y)
allowed_target_types = ('binary', 'multiclass')
if type_of_target_y not in allowed_target_types:
raise ValueError(
'Supported target types are: {}. Got {!r} instead.'.format(
allowed_target_types, type_of_target_y))
y = column_or_1d(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = np.bincount(y_inversed)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("n_splits=%d cannot be greater than the"
" number of members in each class."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of members in any class cannot"
" be less than n_splits=%d."
% (min_groups, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting ``random_state``
to an integer.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super().split(X, y, groups)
class RepeatedExogenouslyStratifiedKFold(_RepeatedSplits):
"""Repeated Stratified K-Fold cross validator.
Repeats Stratified K-Fold n times with different randomization in each
repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
Examples
--------
>>> from sklearn.model_selection import RepeatedStratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2,
... random_state=36851234)
>>> for train_index, test_index in rskf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
See also
--------
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, exo_data=None, n_splits=5, n_repeats=10, random_state=None):
super().__init__(
ExogenouslyStratifiedKFold,
n_repeats,
random_state,
n_splits=n_splits,
exo_data=exo_data,
)
```
#### File: model/core_python/core_python_examples.py
```python
import numpy as np
class Dummy():
def __init__(self):
'''nothing to do here'''
def calc_pm1(self, exp_vars):
return self.__sum_exp_vars(exp_vars)
def calc_pm2(self, exp_vars):
return self.__sum_exp_vars(exp_vars) * 2
def calc_pm10(self, exp_vars):
return self.__sum_exp_vars(exp_vars) * 10
def calc_pm100(self, exp_vars):
return self.__sum_exp_vars(exp_vars) * 100
def __sum_exp_vars(self,ev):
return ev['exp_var 1'] + ev['exp_var 2']
def __call__(self, **kwargs):
return dict(
pm_1=self.calc_pm1(kwargs),
pm_2=self.calc_pm2(kwargs),
pm_10=self.calc_pm10(kwargs),
pm_100=self.calc_pm100(kwargs),
)
def NoisyDummy(**kwargs):
lever1 = kwargs.get('lever1', 0)
lever2 = kwargs.get('lever2', 0)
uncertain1 = kwargs.get('uncertain1', 3)
uncertain2 = np.exp(kwargs.get('uncertain2', -0.7))
uncertain3 = np.exp(kwargs.get('uncertain3', 0.7))
certain4 = kwargs.get('certain4', 3)
noise_amplitude = kwargs.get('noise_amplitude', 2.0)
noise_frequency = kwargs.get('noise_frequency', 5.0)
pm_1 = (
- uncertain2 * lever1 * lever1
+ (uncertain1 + certain4) * (lever1 + lever2)
+ noise_amplitude * np.sin(noise_frequency * lever1)
)
pm_2 = np.minimum(
1.11e+111 * uncertain1,
np.exp(
uncertain3 * lever1 * (lever1 + lever2)
+ uncertain1 * lever1
+ noise_amplitude * np.cos(noise_frequency * lever2)
)
)
pm_3 = (
noise_amplitude * np.cos(noise_frequency * lever1)
+ noise_amplitude * np.sin(noise_frequency * lever2)
+ certain4
)
pm_4 = np.exp(
uncertain1 + certain4
)
return {'pm_1':pm_1, 'pm_2': pm_2, 'pm_3': pm_3, 'pm_4':pm_4}
def pmt(rate, nper, pv, fv=0, when='end'):
"""
Compute the payment against loan principal plus interest.
Given:
* a present value, `pv` (e.g., an amount borrowed)
* a future value, `fv` (e.g., 0)
* an interest `rate` compounded once per period, of which
there are
* `nper` total
* and (optional) specification of whether payment is made
at the beginning (`when` = {'begin', 1}) or the end
(`when` = {'end', 0}) of each period
Return:
the (fixed) periodic payment.
Parameters
----------
rate : array_like
Rate of interest (per period)
nper : array_like
Number of compounding periods
pv : array_like
Present value
fv : array_like, optional
Future value (default = 0)
when : {{'begin', 1}, {'end', 0}}, {string, int}
When payments are due ('begin' (1) or 'end' (0))
Returns
-------
out : ndarray
Payment against loan plus interest. If all input is scalar, returns a
scalar float. If any input is array_like, returns payment for each
input element. If multiple inputs are array_like, they all must have
the same shape.
Notes
-----
This function is replicated from the numpy_financial package, under the same
LICENSE as the TMIP-EMAT repository.
Copyright (c) 2005-2019, NumPy Developers. All rights reserved.
"""
_when_to_num = {'end': 0, 'begin': 1,
'e': 0, 'b': 1,
0: 0, 1: 1,
'beginning': 1,
'start': 1,
'finish': 0}
def _convert_when(when):
# Test to see if when has already been converted to ndarray
# This will happen if one function calls another, for example ppmt
if isinstance(when, np.ndarray):
return when
try:
return _when_to_num[when]
except (KeyError, TypeError):
return [_when_to_num[x] for x in when]
when = _convert_when(when)
(rate, nper, pv, fv, when) = map(np.array, [rate, nper, pv, fv, when])
temp = (1 + rate)**nper
mask = (rate == 0)
masked_rate = np.where(mask, 1, rate)
fact = np.where(mask != 0, nper,
(1 + masked_rate*when)*(temp - 1)/masked_rate)
return -(fv + pv*temp) / fact
def Road_Capacity_Investment(
# constant
free_flow_time=60,
initial_capacity=100,
# uncertainty
alpha=0.15,
beta=4.0,
input_flow=100,
value_of_time=0.01,
unit_cost_expansion=1,
interest_rate=0.03,
yield_curve=0.01,
# policy
expand_capacity=10,
amortization_period=30,
interest_rate_lock=False,
debt_type='GO Bond',
lane_width=10,
**kwargs,
):
"""
A fictitious example model for road capacity investment.
This model simulates a capacity expansion investment on a single
network link. The link volume-delay function is governed by the
`BPR function <https://en.wikipedia.org/wiki/Route_assignment#Frank-Wolfe_algorithm>`_.
This model is a bit contrived, because it is designed to explicitly demonstrate
a wide variety of EMAT features in a transportation planning model that is as simple
as possible. For example, the policy levers are structured so that there is one
of each dtype (float, int, bool, and categorical).
Args:
free_flow_time (float, default 60): The free flow travel time on the link.
initial_capacity (float, default 100): The pre-expansion capacity on the link.
alpha (float, default 0.15): Alpha parameter to the BPR volume-delay function.
beta (float, default 4.0): Beta parameter to the BPR volume-delay function.
input_flow (float, default 100): The future input flow on the link.
value_of_time (float, default 0.01): The value of a unit of travel time savings
per unit of flow on the link.
unit_cost_expansion (float, default 1): The present marginal cost of adding one
unit of capacity to the link (assumes no economies of scale on expansion cost)
interest_rate (float, default 0.03): The interest rate actually incurred for
revenue bonds amortized over 15 years. The interest rate for general obligation
bonds is assumed to be 0.0025 less than this value.
yield_curve (float, default 0.01): The marginal increase in the interest_rate if
the amortization period is 50 years instead of 15. The yield curve is assumed
to be linearly projected to all other possible amortization periods
expand_capacity (float, default 10): The amount of capacity expansion actually
constructed.
amortization_period (int, default 30): The time period over which the construction
costs are amortized.
interest_rate_lock (bool, default False): Whether interest rates are locked at
the assumed current rate of 0.03 / 0.01 or allowed to float.
debt_type ('GO Bond', 'Rev Bond', 'Paygo'): Type of financing. General obligation
bonds are assumed to have a lower interest rate than revenue bonds, but
may be politically less desirable. Pay-as-you-go financing incurs no actual
interest costs, but requires actually having the funds available.
lane_width (float, default 10): The width of lanes on the roadway. This parameter
is intentionally wacky, causing massive congestion for any value other than 10,
to demonstrate what might happen with broken model inputs.
Returns:
dict:
no_build_travel_time
The average travel time on the link if no
capacity expansion was constructed.
build_travel_time
The average travel time on the link after expansion.
time_savings
The average travel time savings as a result of the
expansion.
value_of_time_savings
The total value of the travel time savings,
accounting for the time savings per traveler, the total flow, and
the value of time.
present_cost_expansion
The present cost of building the expansion
cost_of_capacity_expansion
The annual payment to finance the expansion,
when amortized.
net_benefits
The value of the time savings minus the annual payment.
"""
debt_type = debt_type.lower()
assert debt_type in ('go bond', 'paygo', 'rev bond')
average_travel_time0 = free_flow_time * (1 + alpha*(input_flow/initial_capacity)**beta)
capacity = initial_capacity + expand_capacity
average_travel_time1 = free_flow_time * (1 + alpha*(input_flow/capacity)**beta)
oops = np.absolute(lane_width-10)
average_travel_time1 += (oops*1000)**0.5 + np.sin(input_flow)*oops*2
travel_time_savings = average_travel_time0 - average_travel_time1
value_of_time_savings = value_of_time * travel_time_savings * input_flow
present_cost_of_capacity_expansion = float(unit_cost_expansion * expand_capacity)
if interest_rate_lock:
interest_rate = 0.03
yield_curve = 0.01
if (debt_type == 'go bond'):
interest_rate -= 0.0025
elif (debt_type == 'paygo'):
interest_rate = 0
effective_interest_rate = interest_rate + yield_curve * (amortization_period-15) / 35
cost_of_capacity_expansion = pmt(
effective_interest_rate,
amortization_period,
present_cost_of_capacity_expansion,
)
return dict(
no_build_travel_time=average_travel_time0,
build_travel_time=average_travel_time1,
time_savings=travel_time_savings,
value_of_time_savings=value_of_time_savings,
present_cost_expansion=present_cost_of_capacity_expansion,
cost_of_capacity_expansion=-cost_of_capacity_expansion,
net_benefits = value_of_time_savings + cost_of_capacity_expansion,
)
def _Road_Capacity_Investment_CmdLine():
"""
This is a demo for calling a core model function on the command line.
"""
import argparse, pandas, os, sys, warnings
parser = argparse.ArgumentParser()
parser.add_argument('--levers', type=str, default='levers.yml', help='Levers Yaml File')
parser.add_argument('--uncs', type=str, default="uncs.yml", help='Uncertainties Yaml File')
parser.add_argument('--no-random-crashes', action='store_true', help='disable random crashes')
args = parser.parse_args()
import logging
logger = logging.getLogger('emat.RoadTest')
file_handler = logging.FileHandler("emat-road-test.log")
file_handler.setLevel(10)
LOG_FORMAT = '[%(asctime)s] %(name)s.%(levelname)s: %(message)s'
file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(20)
console_handler.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.setLevel(10)
logger.info("running emat-road-test-demo")
logger.debug(str(args))
logger.debug(str(os.getcwd()))
import yaml
if os.path.exists(args.levers):
with open(args.levers, 'rt') as f:
levers = yaml.safe_load(f)
else:
levers = {'mandatory_unused_lever':42}
if os.path.exists(args.uncs):
with open(args.uncs, 'rt') as f:
uncs = yaml.safe_load(f)
else:
uncs = {}
if 'mandatory_unused_lever' not in levers:
raise ValueError("missing 'mandatory_unused_lever'")
if levers['mandatory_unused_lever'] != 42:
raise ValueError("incorrect value for 'mandatory_unused_lever', must be 42")
if 'unit_cost_expansion' in uncs:
raise ValueError("cannot give 'unit_cost_expansion', use 'labor_unit_cost_expansion' and 'materials_unit_cost_expansion'")
if uncs.get('labor_unit_cost_expansion', 0) <= uncs.get('materials_unit_cost_expansion', 0):
raise ValueError("'labor_unit_cost_expansion' cannot be less than or equal 'materials_unit_cost_expansion'")
if uncs.get('labor_unit_cost_expansion', 0) > uncs.get('materials_unit_cost_expansion', 0)*2:
raise ValueError("'labor_unit_cost_expansion' cannot be more than double 'materials_unit_cost_expansion'")
unit_cost_expansion = uncs.pop('labor_unit_cost_expansion', 0) + uncs.pop('materials_unit_cost_expansion', 0)
uncs['unit_cost_expansion'] = unit_cost_expansion
# (pseudo)random crash
if not args.no_random_crashes:
if 'expand_capacity' in levers and levers['expand_capacity'] > 90 and not os.path.exists('prevent_random_crash.txt'):
with open('prevent_random_crash.txt', 'wt') as f:
f.write("this file will prevent random crashes in `emat-road-test-demo`")
logger.error("Random crash, ha ha!")
sys.exit(-9)
try:
for k,v in levers.items():
logger.debug(f"lever: {k} = {v}")
for k,v in uncs.items():
logger.debug(f"uncertainty: {k} = {v}")
result = Road_Capacity_Investment(**levers, **uncs)
for k,v in result.items():
logger.debug(f"result: {k} = {v}")
result1 = {str(k):float(result[k]) for k in ['no_build_travel_time','build_travel_time','time_savings']}
result2 = pandas.DataFrame({
'value_of_time_savings': [np.exp(result['value_of_time_savings']/1000), np.nan],
'present_cost_expansion': [np.nan, result['present_cost_expansion']],
'cost_of_capacity_expansion': [np.exp(result['cost_of_capacity_expansion']/1000), np.nan],
'net_benefits': [np.nan,result['net_benefits']],
}, index=['exp','plain'])
with open('output.yaml', 'wt') as f:
yaml.safe_dump(result1, f)
result2.to_csv('output.csv.gz')
logger.info("emat-road-test-demo completed without errors")
except:
logger.exception("unintentional crash")
sys.exit(-8)
```
#### File: emat/multitarget/anisotropic.py
```python
import pandas, numpy
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from .frameable import FrameableMixin
class AnisotropicGaussianProcessRegressor(
GaussianProcessRegressor,
FrameableMixin,
):
def __init__(
self,
kernel_generator=None,
alpha=1e-10,
optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0,
normalize_y=False,
standardize_before_fit=True,
copy_X_train=True,
random_state=None,
):
self.kernel_generator = kernel_generator
self.standardize_before_fit = standardize_before_fit
super().__init__(
kernel=None,
alpha=alpha,
optimizer=optimizer,
n_restarts_optimizer=n_restarts_optimizer,
normalize_y=normalize_y,
copy_X_train=copy_X_train,
random_state=random_state,
)
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel_generator is None:
if self.standardize_before_fit:
kernel_generator = lambda dims: RBF([1.0] * dims)
else:
kernel_generator = lambda dims: C() * RBF([1.0] * dims)
else:
kernel_generator = self.kernel_generator
self.kernel = kernel_generator(X.shape[1])
self._pre_fit(X, y)
if self.standardize_before_fit:
y = numpy.copy(y)
self.standardize_Y = y.std(axis=0, ddof=0)
y /= self.standardize_Y
else:
self.standardize_Y = None
return super().fit(X, y)
def predict(self, X, return_std=False, return_cov=False):
"""
Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_cov:
y_hat, y_cov = super().predict(X, return_std=return_std, return_cov=return_cov)
y_std = None
elif return_std:
y_hat, y_std = super().predict(X, return_std=return_std, return_cov=return_cov)
y_cov = None
else:
y_hat = super().predict(X, return_std=return_std, return_cov=return_cov)
y_std = None
y_cov = None
if self.standardize_Y is not None:
try:
y_hat *= self.standardize_Y[None, :]
except IndexError:
y_hat *= self.standardize_Y
if y_std is not None:
try:
y_std *= self.standardize_Y[None, :]
except IndexError:
y_std *= self.standardize_Y
if y_cov is not None:
raise NotImplementedError()
y_hat = self._post_predict(X, y_hat)
if y_std is not None:
y_std = self._post_predict(X, y_hat)
if y_std is not None:
return y_hat, y_std
if y_cov is not None:
return y_hat, y_cov
return y_hat
```
#### File: emat/multitarget/boosting.py
```python
from sklearn.base import RegressorMixin, BaseEstimator, clone
from .frameable import FrameableMixin
class BoostedRegressor(BaseEstimator, RegressorMixin, FrameableMixin):
def __init__(
self,
estimators,
):
self.estimators = estimators
def fit(self, X, Y, sample_weight=None):
if sample_weight is not None:
raise NotImplementedError
self._pre_fit(X, Y)
self.estimators_ = []
Y_ = Y
for n,e in enumerate(self.estimators):
e_ = clone(e)
e_.fit(X, Y_)
self.estimators_.append(e_)
if n+1 < len(self.estimators):
Y_ = Y_ - e_.predict(X)
def predict(self, X):
Yhat = self.estimators_[0].predict(X)
for e_ in self.estimators_[1:]:
Yhat += e_.predict(X)
Yhat = self._post_predict(X, Yhat)
return Yhat
```
#### File: emat/scope/names.py
```python
class ShortnameMixin:
"""
Adds a shortname attribute, which falls back to name if not set.
"""
@property
def shortname(self):
"""Str: An abbreviated name, or the full name if not otherwise defined."""
if not hasattr(self, '_shortname') or self._shortname is None:
return self.name
return self._shortname
@shortname.setter
def shortname(self, value):
if value is None:
self._shortname = None
else:
self._shortname = str(value)
if self._shortname == self.name:
self._shortname = None
@shortname.deleter
def shortname(self):
self._shortname = None
@property
def shortname_if_any(self):
"""Str: The abbreviated name, or None."""
if not hasattr(self, '_shortname') or self._shortname is None:
return None
return self._shortname
class TaggableMixin:
"""
Adds a `tags` attribute.
"""
@property
def tags(self):
"""Set: A set of tags attached to this object."""
tags = getattr(self, '_tags', None)
if not tags:
tags = set()
return tags
def add_tag(self, tag):
if not hasattr(self, '_tags'):
self._tags = {tag}
else:
self._tags.add(tag)
def remove_tag(self, tag):
if hasattr(self, '_tags'):
self._tags.remove(tag)
```
#### File: emat/scope/parameter.py
```python
import numbers
from typing import Collection, Any, Mapping
import numpy
from ..workbench.em_framework import parameters as workbench_param
from scipy import stats
from scipy.stats._distn_infrastructure import rv_frozen
from ..util import distributions, DistributionTypeError, DistributionFreezeError
from ..util import make_rv_frozen, rv_frozen_as_dict
from .names import ShortnameMixin, TaggableMixin
def standardize_parameter_type(original_type):
"""Standardize parameter type descriptions
Args:
original_type (str): The original type
Returns:
str: The standarized type name
"""
original_type = original_type.lower()
if 'unc' in original_type:
return 'uncertainty'
if 'lev' in original_type:
return 'lever'
elif 'con' in original_type or 'fix' in original_type:
return 'constant'
raise ValueError('cannot decipher parameter ptype')
def standardize_data_type(original_type):
dtype = str(original_type).lower()
dtype = {
'float': 'real',
'float32': 'real',
'float64': 'real',
'floating': 'real',
'double': 'real',
'integer': 'int',
'int32': 'int',
'int64': 'int',
'long': 'int',
'boolean': 'bool',
'category': 'cat',
'categorical': 'cat',
}.get(dtype, dtype)
return dtype
def _get_bounds_from_dist(dist):
ppf_zero = 0
try:
if isinstance(dist.dist, stats.rv_discrete):
# ppf at actual zero for rv_discrete gives lower bound - 1
# due to a quirk in the scipy.stats implementation
# so we use the smallest positive float instead
ppf_zero = 5e-324
except AttributeError:
pass
lower_bound = dist.ppf(ppf_zero)
upper_bound = dist.ppf(1.0)
return lower_bound, upper_bound
def _get_lower_bound_from_dist(dist):
ppf_zero = 0
try:
if isinstance(dist.dist, stats.rv_discrete):
# ppf at actual zero for rv_discrete gives lower bound - 1
# due to a quirk in the scipy.stats implementation
# so we use the smallest positive float instead
ppf_zero = 5e-324
except AttributeError:
pass
lower_bound = dist.ppf(ppf_zero)
return lower_bound
def _get_upper_bound_from_dist(dist):
upper_bound = dist.ppf(1.0)
return upper_bound
def make_parameter(
name,
ptype='constant',
desc='missing description',
min=None,
max=None,
dist=None,
default=None,
corr=None,
address=None,
dtype='infer',
values=None,
resolution=None,
shortname=None,
abbrev=None,
):
"""
Factory method to build a Parameter or Constant for a model.
This function will create an object of the appropriate (sub)class
for the parameter or constant.
Args:
name (str): A name for this parameter. The name must be a `str`
and ideally a valid Python identifier (i.e., begins with
a letter or underscore, contains only letters, numerals, and
underscores).
ptype (str, default 'constant'): The type for this parameter, one
of {'constant', 'uncertainty', 'lever'}.
min (numeric, optional): The minimum value for this parameter.
max (numeric, optional): The maximum value for this parameter.
dist (str or Mapping or rv_frozen, optional): A definition of a distribution
to use for this parameter. Can be specified just as the name of the distribution
when that distribution is parameterized only by the min and max
(e.g., 'uniform'). If the distribution requires other parameters,
this argument should be a Mapping, with keys including 'name' for
the name of the distribution, as well as giving one or more
named distributional parameters as appropriate. Or, just pass
a rv_frozen object directly (see scipy.stats). The distribution
defined here is of primary use for uncertainty parameters, as the
features of defined uncertainty distributions can be used to
derive probability distributions on outputs. However, distributions
can also be used for policy lever parameters to guide the development
of appropriate experimental designs.
default (Any, optional): A default value for this parameter. The default
value is used as the actual value for constant parameters. It is also
used during univariate sensitivity testing as the value for this
parameter when other parameters are being evaluated at non-default
values.
corr (dict, optional): A correlation definition that relates this parameter
to others. Only applicable for uncertainty parameters.
address (Any, optional): The address to use to access this parameter in
the model. This is an implementation-specific detail. For example,
in an Excel-based model, the address could be a sheet and cell reference
given as a string.
dtype (str, default 'infer'): A dtype for this parameter, one
of {'cat', 'int', 'real', 'bool'} or some sub-class variant or specialization
thereof (e.g., int64).
values (Collection, optional): A collection of possible values, relevant only
for categorical parameters.
resolution (Collection, optional): A collection of possible particular values,
used to set the possible values considered when sampling with factorial-based
designs.
shortname (str, optional):
A shorter name, especially useful when the name of this parameter
is a long strings that may not display neatly in figures.
abbrev (Mapping, optional):
A set of abbreviations used for values, especially useful when the names of
values are long strings that may not display neatly in figures.
Returns:
Parameter or Constant
"""
# Convert dist to a Mapping if it is just a string
if isinstance(dist, str):
dist = {'name': dist}
# Default correlation is an empty list.
corr = corr if corr is not None else []
# Standardize the dtype to a lowercase string of
# correct type
dtype = standardize_data_type(dtype)
if dtype == 'infer':
if values is not None:
if set(values) == {True, False}:
dtype = 'bool'
else:
dtype = 'cat'
elif max is True and min is False:
dtype = 'bool'
elif isinstance(min, numbers.Integral) and isinstance(max, numbers.Integral):
dtype = 'int'
elif isinstance(min, numbers.Real) and isinstance(max, numbers.Real):
dtype = 'real'
elif dist is not None and isinstance(dist, Mapping):
try:
make_rv_frozen(**dist, discrete=True)
except DistributionTypeError:
dtype = 'real'
else:
dtype = 'int'
else:
raise ValueError(f'cannot infer dtype for {name}, give it explicitly')
if dtype not in ('cat', 'int', 'real', 'bool'):
message = f"invalid dtype {dtype} for parameter {name}, must be 'cat', 'int', 'real', or 'bool'"
if 'unc' in dtype:
message += "\n(to set this parameter as an uncertainty, set `ptype` to 'uncertainty', not `dtype`)"
elif 'lev' in dtype:
message += "\n(to set this parameter as a lever, set `ptype` to 'lever', not `dtype`)"
elif 'con' in dtype:
message += "\n(to set this parameter as a constant, set `ptype` to 'constant', not `dtype`)"
raise ValueError(message)
# Data checks
if dist is not None and not isinstance(dist, Mapping) and not isinstance(dist, rv_frozen):
raise TypeError(f'dist must be a dict or rv_frozen for {name}, not {type(dist)}')
if dist is None:
dist_ = {}
rv_gen = None
elif isinstance(dist, rv_frozen):
dist_ = {'name': dist.dist.name}
dist_.update(dist.kwds)
rv_gen = dist
else:
dist_ = dist
rv_gen = None
dist_for_maker = dist_.copy()
if min is not None:
dist_for_maker['min'] = min
if max is not None:
dist_for_maker['max'] = max
if dtype=='bool':
dist_for_maker['min'] = 0
dist_for_maker['max'] = 1
elif dtype=='cat':
dist_for_maker['min'] = 0
dist_for_maker['max'] = (len(values)-1) if values is not None else 0
ptype = standardize_parameter_type(ptype)
if ptype is 'constant':
if dist_.get('name') is None:
dist_['name'] = 'constant'
if dist_.get('name') != 'constant':
raise ValueError(f'constant cannot have non-constant distribution for {name}')
rv_gen_tentative = None
else:
# If inferred dtype is int but distribution is discrete, promote to real
try:
rv_gen_tentative = rv_gen or make_rv_frozen(**dist_for_maker, discrete=True)
except DistributionTypeError:
if dtype == 'int':
dtype = 'real'
rv_gen_tentative = rv_gen or make_rv_frozen(**dist_for_maker, discrete=False)
except DistributionFreezeError as err:
raise DistributionFreezeError(f'on freeze for {name}') from err
if dtype == 'bool':
if min is None:
min = False
if min != False:
raise ValueError(f'min of bool must be False for {name}')
if max is None:
max = True
if max != True:
raise ValueError(f'max of bool must be True for {name}')
values = [False, True]
if dtype in {'int', 'real'}:
if dist_.get('name') == 'constant':
if min is None and default is not None:
min = default
if max is None and default is not None:
max = default
if rv_gen_tentative is not None:
min, max = _get_bounds_from_dist(rv_gen_tentative)
if min is None:
raise ValueError(f'min of {dtype} is required for {name}')
if max is None:
raise ValueError(f'max of {dtype} is required for {name}')
# Create the Parameter
if ptype == 'constant':
if dtype == 'cat':
p = Constant(name, default, desc=desc, address=address, dtype='cat')
else:
p = Constant(name, default, desc=desc, address=address)
elif dtype == 'cat':
p = CategoricalParameter(
name,
values,
default=default,
desc=desc,
address=address,
ptype=ptype,
corr=corr,
abbrev=abbrev,
shortname=shortname,
)
elif dtype == 'int':
rv_gen = rv_gen or make_rv_frozen(**dist_for_maker, discrete=True)
if rv_gen is None:
raise ValueError(f'failed to make {name} ({ptype}) from {dist_}')
# p = Constant(name, default, desc=desc, address=address)
else:
p = IntegerParameter(
name,
lower_bound=min,
upper_bound=max,
resolution=resolution,
default=default,
dist=rv_gen,
dist_def=dist_,
desc=desc,
address=address,
ptype=ptype,
corr=corr,
abbrev=abbrev,
shortname=shortname,
)
elif dtype == 'real':
rv_gen = rv_gen or make_rv_frozen(**dist_for_maker)
if rv_gen is None:
raise ValueError(f'failed to make {name} ({ptype}) from {dist_}')
# p = Constant(name, default, desc=desc, address=address)
else:
p = RealParameter(
name,
lower_bound=min,
upper_bound=max,
resolution=resolution,
default=default,
dist=rv_gen,
dist_def=dist_,
desc=desc,
address=address,
ptype=ptype,
corr=corr,
abbrev=abbrev,
shortname=shortname,
)
elif dtype == 'bool':
rv_gen = rv_gen or make_rv_frozen(**dist_for_maker, discrete=True)
if rv_gen is None:
raise ValueError(f'failed to make {name} ({ptype}) from {dist_}')
# p = Constant(name, default, desc=desc, address=address)
else:
p = BooleanParameter(
name,
default=default,
dist=rv_gen,
dist_def=dist_,
desc=desc,
address=address,
ptype=ptype,
corr=corr,
abbrev=abbrev,
shortname=shortname,
)
else:
raise ValueError(f"invalid dtype {dtype}")
return p
class Constant(workbench_param.Constant):
ptype = 'constant'
"""str: Parameter type, for compatibility with Parameter."""
def __init__(self, name, value, desc="", address=None, dtype=None):
if value is None:
raise ValueError("Constant.value cannot be None")
workbench_param.Constant.__init__(
self,
name,
value,
)
self.desc = desc
"""str: Human readable description of this constant, for reference only"""
self.address = address
"""
Any: The address to use to access this parameter in the model.
This is an implementation-specific detail. For example,
in an Excel-based model, the address could be a sheet and cell reference
given as a string.
"""
if dtype is None:
dtype = numpy.asarray(value).dtype
dtype = standardize_data_type(dtype)
self.dtype = dtype
"""str: The dtype for the value, as a string."""
@property
def default(self):
"""Read-only alias for value"""
return self.value
@property
def values(self):
"""list: The value as a one-item list"""
return [self.value,]
def __eq__(self, other):
try:
if self.address != other.address:
return False
if self.dtype != other.dtype:
return False
except AttributeError:
return False
return super().__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
class Parameter(workbench_param.Parameter, ShortnameMixin, TaggableMixin):
dtype = None
def __init__(
self,
name,
dist,
*,
lower_bound=None,
upper_bound=None,
resolution=None,
default=None,
variable_name=None,
pff=False,
desc="",
address=None,
ptype=None,
corr=None,
dist_def=None,
shortname=None,
abbrev=None,
tags=None,
):
# The default constructor for ema_workbench parameters uses no distribution
# But for EMAT, we want to always define a distribution explicitly
# for clarity.
if dist is None and (lower_bound is None or upper_bound is None):
raise ValueError("must give lower_bound and upper_bound, or dist")
if dist is None:
from scipy.stats import uniform
dist = uniform(lower_bound, upper_bound-lower_bound)
if isinstance(dist, str):
dist = {'name':dist}
if isinstance(dist, Mapping):
dist = dict(**dist)
if lower_bound is not None:
dist['min'] = lower_bound
if upper_bound is not None:
dist['max'] = upper_bound
dist = make_rv_frozen(**dist)
# We extract and set the lower and upper bounds here,
# in order to use the default constructor from the workbench.
ppf_zero = 0
if isinstance(dist.dist, stats.rv_discrete): # @UndefinedVariable
# ppf at actual zero for rv_discrete gives lower bound - 1
# due to a quirk in the scipy.stats implementation
# so we use the smallest positive float instead
ppf_zero = 5e-324
lower_bound = dist.ppf(ppf_zero)
upper_bound = dist.ppf(1.0)
if self.dtype == 'int':
lower_bound = int(lower_bound)
upper_bound = int(upper_bound)
workbench_param.Parameter.__init__(
self,
name=name,
lower_bound=lower_bound,
upper_bound=upper_bound,
resolution=resolution,
default=default,
variable_name=variable_name,
pff=pff,
)
self.dist = dist
self.desc = desc
"""str: Human readable description of this parameter, for reference only"""
self.address = address
"""
Any: The address to use to access this parameter in the model.
This is an implementation-specific detail. For example,
in an Excel-based model, the address could be a sheet and cell reference
given as a string.
"""
self.ptype = standardize_parameter_type(ptype) if ptype is not None else None
"""str: Parameter type, one of {'constant', 'uncertainty', 'lever'}"""
self.corr = corr if corr is not None else []
"""Dict: A correlation definition. Key give names of other parameters, values give correlation."""
self.dist_def = dict(dist_def) if dist_def is not None else {}
"""Dict: The arguments that define the underlying distribution."""
self._shortname = shortname
self.abbrev = abbrev or {}
"""Dict: Abbreviations used for long attribute names in figures."""
if tags:
if isinstance(tags, str):
tags = [tags]
for tag in tags:
self.add_tag(tag)
@property
def min(self):
return self.lower_bound
@property
def max(self):
return self.upper_bound
def __eq__(self, other):
try:
if type(self) != type(other):
return False
if self.address != other.address:
return False
if self.dtype != other.dtype:
return False
if self.ptype != other.ptype:
return False
if self.corr != other.corr:
return False
if self.distdef != other.distdef:
print(f"distdef not equal: {self.distdef} != {other.distdef}")
return False
except AttributeError:
return False
if not isinstance(self, other.__class__):
return False
self_keys = set(self.__dict__.keys())
other_keys = set(other.__dict__.keys())
if self_keys - other_keys:
return False
else:
for key in self_keys:
if key == 'dist_def':
continue
if key != 'dist':
if getattr(self, key) != getattr(other, key):
return False
else:
# name, parameters
self_dist = getattr(self, key)
other_dist = getattr(other, key)
if self_dist.dist.name != other_dist.dist.name:
return False
if self_dist.args != other_dist.args:
return False
else:
return True
@property
def distdef(self):
result = rv_frozen_as_dict(self.dist, self.min, self.max)
return result
@property
def dist_description(self):
result = self.dist_def
if isinstance(result, str):
result = {'name':result}
if result.get('name') == 'pert':
if 'gamma' in result:
return f"Pert Distribution (min={self.min}, peak={result.get('peak')}, max={self.max}, gamma={result.get('gamma')})"
return f"Pert Distribution (min={self.min}, peak={result.get('peak')}, max={self.max})"
if result.get('name') == 'uniform' or len(result)==0:
return f"Uniform Distribution (min={self.min}, max={self.max})"
if result.get('name') == 'triangle':
return f"Trianglar Distribution (min={self.min}, peak={result.get('peak')}, max={self.max})"
return str(result)
def __repr__(self):
return f"<emat.{self.__class__.__name__} '{self.name}'>"
def _hash_it(self, ha=None):
from ..util.hasher import hash_it
return hash_it(
self.name,
self.dist_def,
self.resolution,
self.address,
self.dtype,
self.pff,
tuple(self.variable_name),
self.shortname,
self.ptype,
self.corr,
ha=ha,
)
def get_abbrev(self, name):
"""Get an abbreviated name if available."""
try:
return self.abbrev.get(name, name)
except AttributeError:
return name
def set_abbrev(self, values=None, **kwargs):
if values is None:
values = {}
try:
self.abbrev.update(values, **kwargs)
except AttributeError:
self.abbrev = {}
self.abbrev.update(values, **kwargs)
class RealParameter(Parameter, workbench_param.RealParameter):
dtype = 'real'
def __init__(self, name, *, lower_bound=None, upper_bound=None, resolution=None,
default=None, variable_name=None, pff=False, dist=None, dist_def=None,
desc="", address=None, ptype=None, corr=None, shortname=None, abbrev=None):
if dist is None and (lower_bound is None or upper_bound is None):
raise ValueError("must give lower_bound and upper_bound, or dist")
if dist is None:
from scipy.stats import uniform
dist = uniform(lower_bound, upper_bound-lower_bound)
Parameter.__init__(
self,
name,
dist=dist,
resolution=resolution,
default=default,
variable_name=variable_name,
pff=pff,
desc=desc,
address=address,
ptype=ptype,
corr=corr,
dist_def=dist_def,
shortname=shortname,
abbrev=abbrev,
)
@property
def min(self):
return float(super().lower_bound)
@property
def max(self):
return float(super().upper_bound)
class IntegerParameter(Parameter, workbench_param.IntegerParameter):
dtype = 'int'
def __init__(self, name, *, lower_bound=None, upper_bound=None, resolution=None,
default=None, variable_name=None, pff=False, dist=None, dist_def=None,
desc="", address=None, ptype=None, corr=None, shortname=None, abbrev=None):
if dist is None and (lower_bound is None or upper_bound is None):
raise ValueError("must give lower_bound and upper_bound, or dist")
if dist is None:
from scipy.stats import randint
dist = randint(lower_bound, upper_bound+1)
Parameter.__init__(
self,
name,
dist=dist,
resolution=resolution,
default=default, variable_name=variable_name, pff=pff,
desc=desc, address=address, ptype=ptype, corr=corr,
dist_def=dist_def,
shortname=shortname,
abbrev=abbrev,
)
if self.resolution is not None:
for entry in self.resolution:
if not isinstance(entry, numbers.Integral):
raise ValueError(('all entries in resolution should be '
'integers'))
@property
def min(self):
return int(super().lower_bound)
@property
def max(self):
return int(super().upper_bound)
class BooleanParameter(Parameter, workbench_param.BooleanParameter):
dtype = 'bool'
def __init__(self, name, *, lower_bound=None, upper_bound=None, resolution=None,
default=None, variable_name=None, pff=False, dist=None, dist_def=None,
desc="", address=None, ptype=None, corr=None, shortname=None, abbrev=None):
Parameter.__init__(
self,
name,
dist=dist,
resolution=resolution,
default=default, variable_name=variable_name, pff=pff,
desc=desc, address=address, ptype=ptype, corr=corr,
dist_def=dist_def,
shortname=shortname,
abbrev=abbrev,
)
cats = [workbench_param.create_category(cat) for cat in [False, True]]
self._categories = workbench_param.NamedObjectMap(workbench_param.Category)
self.categories = cats
self.resolution = [i for i in range(len(self.categories))]
self.multivalue = False
@property
def values(self):
"""List: The possible discrete values."""
return [False, True]
@property
def min(self):
return False
@property
def max(self):
return True
class CategoricalParameter(Parameter, workbench_param.CategoricalParameter):
dtype = 'cat'
def __init__(self, name, categories, *, default=None, variable_name=None,
pff=False, multivalue=False,
desc="", address=None, ptype=None, corr=None,
dist=None, singleton_ok=False, shortname=None, abbrev=None):
lower_bound = 0
upper_bound = len(categories) - 1
from scipy.stats import randint
dist = randint(lower_bound, upper_bound+1)
if upper_bound == 0 and not singleton_ok:
raise ValueError('there should be more than 1 category')
Parameter.__init__(
self,
name,
dist=dist,
resolution=None,
default=default, variable_name=variable_name, pff=pff,
desc=desc, address=address, ptype=ptype, corr=corr,
shortname=shortname,
abbrev=abbrev,
)
cats = [workbench_param.create_category(cat) for cat in categories]
self._categories = workbench_param.NamedObjectMap(workbench_param.Category)
self.categories = cats
self.resolution = [i for i in range(len(self.categories))]
self.multivalue = multivalue
@property
def values(self):
"""List: The possible discrete values."""
return list(i.value for i in self.categories)
@property
def min(self):
"""None: Categorical parameters are not characterized by a lower bound."""
return None
@property
def max(self):
"""None: Categorical parameters are not characterized by an upper bound."""
return None
@property
def distdef(self):
"""None: Categorical parameters distribution is not implemented."""
return None
@property
def dist_description(self):
return "Uniform Distribution"
#############
class OLD_Parameter:
"""
Definitions for a particular input for a model.
Args:
name (str): A name for this parameter. The name must be a `str`
and ideally a valid Python identifier (i.e., begins with
a letter or underscore, contains only letters, numerals, and
underscores).
ptype (str, default 'constant'): The type for this parameter, one
of {'constant', 'uncertainty', 'lever'}.
min (numeric, optional): The minimum value for this parameter.
max (numeric, optional): The maximum value for this parameter.
dist (str or Mapping, optional): A definition of a distribution
to use for this parameter, which is only relevant for uncertainty
parameters. Can be specified just as the name of the distribution
when that distribution is parameterized only by the min and max
(e.g., 'uniform'). If the distribution requires other parameters,
this argument should be a Mapping, with keys including 'name' for
the name of the distribution, as well as giving one or more
named distributional parameters as appropriate.
default (Any, optional): A default value for this parameter. The default
value is used as the actual value for constant parameters. It is also
used during univariate sensitivity testing as the value for this
parameter when other parameters are being evaluated at non-default
values.
corr (dict, optional): A correlation definition that relates this parameter
to others. Only applicable for uncertainty parameters.
address (Any, optional): The address to use to access this parameter in
the model. This is an implementation-specific detail. For example,
in an Excel-based model, the address could be a sheet and cell reference
given as a string.
dtype (str, default 'infer'): A dtype for this parameter, one
of {'cat', 'int', 'real', 'bool'} or some sub-class variant or specialization
thereof (e.g., int64).
values (Collection, optional): A collection of possible values, relevant only
for categorical parameters.
resolution (Collection, optional): A collection of possible particular values,
used to set the possible values considered when sampling with factorial-based
designs.
"""
def __init__(
self,
name,
ptype='constant',
desc='missing description',
min=None,
max=None,
dist=None,
default=None,
corr=None,
address=None,
dtype='infer',
values=None,
resolution=None,
):
self.name = name
"""str: Parameter name, used to identify parameter."""
self.ptype = standardize_parameter_type(ptype)
"""str: Parameter type, one of {'constant', 'uncertainty', 'lever'}"""
self.desc = desc
"""str: Human readable description of this parameter, for reference only"""
self.min = min
"""numeric: Lower bound for this parameter, or None"""
self.max = max
"""numeric: Upper bound for this parameter, or None"""
if isinstance(dist, str):
dist = {'name': dist}
self.dist = dist
"""Dict: definition of a distribution to use for this parameter"""
self.default = default
"""A default value for this parameter, used for constants or in univariate sensitivity testing"""
self.corr = corr if corr is not None else []
self.address = address
self.dtype = standardize_data_type(dtype)
self.values = values
self.resolution = resolution
if self.dtype == 'infer':
if self.values is not None:
if set(self.values) == {True, False}:
self.dtype = 'bool'
else:
self.dtype = 'cat'
elif self.max is True:
self.dtype = 'bool'
elif isinstance(self.max, numbers.Integral):
self.dtype = 'int'
elif isinstance(self.max, numbers.Real):
self.dtype = 'real'
else:
self.dtype = 'bool'
if self.dtype not in ('cat','int','real','bool'):
raise ValueError(f"invalid dtype {self.dtype}")
# Data checks
if self.dist is not None and not isinstance(self.dist, Mapping):
raise TypeError(f'dist must be a dict for {self.name}, not {type(self.dist)}')
if self.ptype is 'constant':
if self.dist is None:
self.dist = {'name': 'constant'}
if self.dist.get('name') != 'constant':
raise ValueError(f'constant cannot have non-constant distribution for {self.name}')
if self.dtype == 'bool':
if self.min is None:
self.min = False
if self.min != False:
raise ValueError(f'min of bool must be False for {self.name}')
if self.max is None:
self.max = True
if self.max != True:
raise ValueError(f'max of bool must be True for {self.name}')
self.values = [False, True]
if self.dtype in {'int','real'}:
if self.dist is not None and self.dist.get('name') == 'constant':
if self.min is None and self.default is not None:
self.min = self.default
if self.max is None and self.default is not None:
self.max = self.default
if self.min is None:
raise ValueError(f'min of {self.dtype} is required for {self.name}')
if self.max is None:
raise ValueError(f'max of {self.dtype} is required for {self.name}')
def get_parameter(self):
"""Get an ema_workbench.Parameter from this emat.Parameter.
This method returns an
:class:`ema_workbench.Parameter <ema_workbench.em_framework.parameters.Parameter>`
of the correct type for this :class:`emat.Parameter`. This will be one of:
* :class:`Constant <ema_workbench.em_framework.parameters.Constant>`
* :class:`CategoricalParameter <ema_workbench.em_framework.parameters.CategoricalParameter>`
* :class:`IntegerParameter <ema_workbench.em_framework.parameters.IntegerParameter>`
* :class:`RealParameter <ema_workbench.em_framework.parameters.RealParameter>`
* :class:`BooleanParameter <ema_workbench.em_framework.parameters.BooleanParameter>`
"""
if self.ptype == 'constant':
return Constant(self.name, self.default)
elif self.dtype == 'cat':
return CategoricalParameter(self.name, self.values, default=self.default,)
elif self.dtype == 'int':
return IntegerParameter(
self.name, self.min, self.max, resolution=self.resolution,
default=self.default,
)
elif self.dtype == 'real':
if self.dist is not None and len(self.dist) > 0:
_d = self.dist.copy()
distname = _d.pop('name', 'uniform')
if distname == 'uniform':
_d['loc'] = self.min
_d['scale'] = self.max - self.min
elif distname == 'triangle':
_d['lower_bound'] = self.min
_d['upper_bound'] = self.max
elif distname == 'pert':
_d['lower_bound'] = self.min
_d['upper_bound'] = self.max
dist_gen = getattr(distributions, distname)
dist = dist_gen(**_d)
else:
dist = None
return RealParameter(
self.name, self.min, self.max, resolution=self.resolution,
default=self.default,
dist=dist,
)
elif self.dtype == 'bool':
return BooleanParameter(self.name, default=self.default)
else:
raise ValueError(f"invalid dtype {self.dtype}")
def __repr__(self):
classname = self.__class__.__name__
name = self.name
rep = f"{classname}('{name}', dtype={self.dtype}, ptype='{self.ptype}'"
rep += ')'
return rep
def __eq__(self, other):
keys = {'name', 'ptype', 'desc', 'dtype', 'default',
'min', 'max', 'dist', 'corr', 'values',
'address', 'resolution'}
for k in keys:
if getattr(self, k) != getattr(other, k):
return False
return True
```
#### File: emat/util/constraints.py
```python
import pandas
from ..workbench import Constraint
def batch_contraint_check(
constraints,
parameter_frame,
outcome_frame=None,
aggregate=True,
scope=None,
only_parameters=False,
):
"""
Batch check of constraints
Args:
constraints (Collection[Constraint]):
A collection of Constraints to evaluate.
parameter_frame (pandas.DataFrame, optional):
The parameters (uncertainties and levers) for a
batch of experiments. If scope is given, this can be
split automatically into parameter_frame and
outcome_frame.
outcome_frame (pandas.DataFrame, optional):
The outcomes (performance measures) for a
batch of experiments. If both this and `parameter_frame`
are given, they must have the same indexes. If not
given but the scope is given, the outcome_frame
is created from `parameter_frame`.
aggregate (bool, default True):
Return a single boolean series that indicates whether
all of the constraints are satisfied. Otherwise,
a pandas.DataFrame is returned with a column for every
constraint.
scope (Scope, optional):
A Scope used to identify parameters and outcomes.
only_parameters (bool, default False):
Only check constraints based exclusively on parameters.
Returns:
pandas.Series: If return_agg is True
pandas.DataFrame: If return_agg is False
Raises:
KeyError:
If a constraint in constraints calls for a parameter or
outcome name that is not present in parameter_frame or
outcome_frame, respectively.
"""
if scope is not None and outcome_frame is None:
_p, _o = [], []
for col in parameter_frame.columns:
if col in scope.get_parameter_names():
_p.append(col)
else:
_o.append(col)
parameter_frame, outcome_frame = parameter_frame[_p], parameter_frame[_o]
if parameter_frame is None and outcome_frame is not None:
parameter_frame = pandas.DataFrame(index=outcome_frame.index, columns=[])
if parameter_frame is not None and outcome_frame is None:
outcome_frame = pandas.DataFrame(index=parameter_frame.index, columns=[])
assert len(parameter_frame) == len(outcome_frame)
results = pandas.DataFrame(
data=True,
index=parameter_frame.index,
columns=[c.name for c in constraints],
dtype=bool,
)
if len(parameter_frame):
for c in constraints:
assert isinstance(c, Constraint)
if only_parameters and c.outcome_names:
continue
constraint_data = pandas.concat([
parameter_frame[c.parameter_names],
outcome_frame[c.outcome_names],
], axis=1)
results[c.name] = (constraint_data.apply(c.process, axis=1) == 0)
if aggregate:
return results.all(axis=1)
else:
return results
```
#### File: util/filez/spooling.py
```python
import os
import time
import shutil
from .timing import creation_date, append_date_to_filename
def filename_split(filename):
pathlocation, basefile = os.path.split(filename)
basefile_list = basefile.split(".")
if len(basefile_list) > 1:
basename = ".".join(basefile_list[:-1])
extension = "." + basefile_list[-1]
else:
basename = basefile_list[0]
extension = ""
return (pathlocation, basename, extension)
def filename_fuse(pathlocation, basename, extension):
x = os.path.join(pathlocation, basename)
if extension != "": x += "." + extension
return x
def next_filename(
filename,
format="{basename:s}.{number:03d}{extension:s}",
suffix=None,
plus=0,
allow_natural=False,
demand_natural=False,
):
"""Finds the next file name in this stack that does not yet exist.
Parameters
----------
filename : str or None
The base file name to use for this stack. New files would have a number
appended after the basename but before the dot extension. For example,
if the filename is "/tmp/boo.txt", the first file created will be named
"/tmp/boo.001.txt". If None, then a temporary file is created instead.
Other Parameters
----------------
suffix : str, optional
If given, use this file extension instead of any extension given in the filename
argument. The usual use case for this parameter is when filename is None,
and a temporary file of a particular kind is desired.
format : str, optional
If given, use this format string to generate new stack file names in a
different format.
plus : int, optional
If given, increase the returned filenumber by this amount more than what
is needed to generate a new file. This can be useful with pytables, which can
create pseudo-files that don't appear on disk but should all have unique names.
allow_natural : bool
If true, this function will return the unedited `filename` parameter
if that file does not already exist. Otherwise will always have a
number appended to the name.
demand_natural : bool
If true, this function will just throw a FileExistsError instead of spooling
if the file already exists.
"""
if filename is not None:
filename = os.path.expanduser(filename)
if demand_natural and os.path.exists(filename):
raise FileExistsError(filename)
if allow_natural and not os.path.exists(filename):
return filename
pathlocation, basename, extension = filename_split(filename)
if suffix is not None:
extension = "." + suffix
fn = lambda n: os.path.join(pathlocation, format.format(basename=basename, extension=extension, number=n))
n = 1
while os.path.exists(fn(n)):
n += 1
return fn(n + plus)
def archive_existing_file(
filename,
archive_path=None,
tag='now',
):
"""
Archive a file.
Parameters
----------
filename : Path-like
Source file.
archive_path : Path-like, optional
Destination for the archival copy. If not given, the file name is modified
in-place and the file is not moved.
tag : {'now', 'creation'}, default 'now'
Appends a tag to the existing file based on the current time, or
the time that the file being archived was created.
"""
if tag=='now':
epoch = time.time()
elif tag=='creation':
epoch = creation_date(filename)
else:
raise ValueError('supported tags are [now, creation]')
if archive_path is None:
archive_path = os.path.dirname(filename)
filebasename = os.path.basename(filename)
if not os.path.exists(filename):
raise FileNotFoundError(filename)
if not os.path.exists(archive_path):
os.makedirs(archive_path)
# send existing file to archive
new_name = next_filename(
append_date_to_filename(os.path.join(archive_path, filebasename), epoch),
allow_natural=True
)
shutil.move(filename, new_name)
```
#### File: emat/util/json_encoder.py
```python
import json
import numpy as np
class NumpyEncoder(json.JSONEncoder):
""" Custom encoder for numpy data types """
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.complex_, np.complex64, np.complex128)):
return {'real': obj.real, 'imag': obj.imag}
elif isinstance(obj, (np.ndarray,)):
return obj.tolist()
elif isinstance(obj, (np.bool_)):
return bool(obj)
elif isinstance(obj, (np.void)):
return None
return json.JSONEncoder.default(self, obj)
def dumps(*args, **kwargs):
return json.dumps(*args, **kwargs, cls=NumpyEncoder)
```
#### File: emat/util/rendering.py
```python
import re
import ast
from ..util.loggers import get_module_logger
_logger = get_module_logger(__name__)
from .xmle import Show, Elem
_png = re.compile(r"png(\(([^\)]*)\))?")
_svg = re.compile(r"svg(\(([^\)]*)\))?")
def _parse_paren(s, format):
for part in s.split(","):
k, v = part.split("=", 1)
k = k.strip()
v = v.strip()
try:
k = ast.literal_eval(k)
except ValueError:
pass
try:
v = ast.literal_eval(v)
except ValueError:
pass
format[k] = v
def render_plotly(figure, format):
"""
Convert a plotly figure to a static image.
Args:
figure (plotly.graph_objs.Figure):
The source figure to convert
format (str or dict):
A string or dictionary that contains the
output formatting instructions. Any format
accepted by the plotly `to_image` method
can be given as a dictionary, which is passed
as keyword arguments to that function.
Alternatively, give a string that contains
a format type, optionally called with other
keyword arguments. Currently only *svg* and
*png* are implemented using the string
approach. If no implemented format is
available, the original plotly Figure is
returned.
Returns:
xmle.Elem or plotly.graph_objs.Figure
Examples:
>>> from plotly.graph_objects import Figure
>>> render_plotly(Figure(), {'format':'png','width':300,'height':500})
...
>>> render_plotly(Figure(), "svg")
...
>>> render_plotly(Figure(), "png(width=500,height=500)")
...
"""
if isinstance(format, str):
is_png = _png.search(format)
if is_png:
format = dict(format="png")
if is_png.group(2):
_parse_paren(is_png.group(2), format)
if isinstance(format, str):
is_svg = _svg.search(format)
if is_svg:
format = dict(format="svg")
if is_svg.group(2):
_parse_paren(is_svg.group(2), format)
_logger.debug(f"render format is {format}")
fallback = format.pop('fallback', False)
if isinstance(format, dict):
try:
return Show(figure.to_image(**format))
except:
if fallback:
return figure
else:
import traceback
err_txt = traceback.format_exc()
return Elem('pre', text=str(err_txt))
else:
return figure
```
#### File: emat/util/seq_grouping.py
```python
from itertools import groupby
from operator import itemgetter
def seq_int_grouper(data):
groupings = []
for k, g in groupby(enumerate(data), (lambda ix : ix[0] - ix[1])):
agg = list(map(itemgetter(1), g))
if len(agg)==1:
groupings.append(str(agg[0]))
else:
groupings.append(f"{agg[0]}-{agg[-1]}")
return ",".join(groupings)
def seq_int_group_expander(seq):
seq = seq.split(",")
data = []
for agg in seq:
if "-" in agg:
first, last = agg.split("-")
data.extend(range(int(first), int(last)+1))
else:
data.append(int(agg))
return data
```
#### File: util/xmle/styles.py
```python
body_font = 'font-family: "Book-Antiqua", "Palatino", serif;'
signature_font = 'font-size:70%; font-weight:100; font-style:italic; font-family: Roboto, Helvetica, sans-serif;'
signature_name_font = 'font-weight:400; font-style:normal; font-family: "Roboto Slab", Roboto, Helvetica, sans-serif;'
def load_css(filename):
import os
css = None
if filename is None or not isinstance(filename, str):
return None
if os.path.exists(filename):
with open(filename, 'r') as f:
css = f.read()
return css
f0 = "{}.css".format(filename)
if os.path.exists(f0):
with open(f0, 'r') as f:
css = f.read()
return css
try:
import appdirs
except ImportError:
pass
else:
f1 = os.path.join(appdirs.user_config_dir('Larch'), filename)
if os.path.exists(f1):
with open(f1, 'r') as f:
css = f.read()
return css
f2 = "{}.css".format(f1)
if os.path.exists(f2):
with open(f2, 'r') as f:
css = f.read()
return css
if '{' in filename and '}' in filename:
return filename
return css
_default_css_jupyter = """
@import url('https://fonts.googleapis.com/css?family=Roboto:400,700,500italic,100italic|Roboto+Mono:300,400,700|Roboto+Slab:200,900|EB+Garamond:400,400i');
.error_report {
color:red; font-family:monospace;
}
div.output_wrapper {""" + body_font + """}
div.output_wrapper table,
div.jp-OutputArea-output table
{
border-collapse:collapse;
}
div.output_wrapper table, div.output_wrapper th, div.output_wrapper td,
div.jp-OutputArea-output table, div.jp-OutputArea-output th, div.jp-OutputArea-output td
{
border: 1px solid #999999;
font-family:"Roboto Mono", monospace;
font-size:9pt;
font-weight:400;
}
div.output_wrapper th, div.output_wrapper td,
div.jp-OutputArea-output th, div.jp-OutputArea-output td
{
padding:2px; text-align:left;
}
div.output_wrapper td.parameter_category,
div.jp-OutputArea-output td.parameter_category
{
font-family:"Roboto", monospace;
font-weight:500;
background-color: #f4f4f4;
font-style: italic;
}
div.output_wrapper th,
div.jp-OutputArea-output th
{
font-family:"Roboto", monospace;
font-weight:700;
}
div.output_wrapper table.dicta,
div.jp-OutputArea-output table.dicta
{
border-left: 2px solid black; margin-bottom:2px; border-top:0; border-right:0
}
div.output_wrapper th.dicta, div.output_wrapper td.dicta,
div.jp-OutputArea-output th.dicta, div.jp-OutputArea-output td.dicta
{
padding-top:0px; text-align:left; border:0;
}
div.output_wrapper div.LinearFunc,
div.jp-OutputArea-output div.LinearFunc
{
font-family:"Roboto Mono", monospace;
font-size:100%;
font-weight:400;
}
.larch_signature {""" + signature_font + """ }
.larch_name_signature {""" + signature_name_font + """}
.larch_head_tag {font-size:150%; font-weight:900; font-family:"Roboto Slab", Verdana;}
.larch_head_tag_ver {font-size:80%; font-weight:200; font-family:"Roboto Slab", Verdana;}
.larch_head_tag_pth {font-size:40%; font-weight:200; font-family:"Roboto Slab", Verdana; padding-left:5px;}
.larch_head_tag_more {font-size:50%; font-weight:300; font-family:"Roboto Mono", monospace; line-height:130%;}
div.output_wrapper a.parameter_reference,
div.jp-OutputArea-output a.parameter_reference
{
font-style: italic; text-decoration: none
}
div.output_wrapper .strut2,
div.jp-OutputArea-output .strut2
{
min-width:1in
}
div.output_wrapper .histogram_cell,
div.jp-OutputArea-output .histogram_cell
{
padding-top:1; padding-bottom:1; vertical-align:center;
}
div.output_wrapper .raw_log pre,
div.jp-OutputArea-output .raw_log pre
{
font-family:"Roboto Mono", monospace;
font-weight:300;
font-size:70%;
}
.dicta pre
{
margin:0;
}
div.output_wrapper caption,
div.jp-OutputArea-output caption,
{
caption-side: bottom;
text-align: left;
font-family: Roboto;
font-style: italic;
font-weight: 100;
font-size: 80%;
}
table.running_parameter_update caption
{
caption-side: top;
text-align: left;
font-family: Roboto;
font-style: italic;
font-weight: 500;
font-size: 100%;
}
table.dictionary
{
border:0px hidden !important; border-collapse: collapse !important;
}
div.blurb {
margin-top: 15px;
max-width: 6.5in;
}
h2.figure_head {
padding-left: .25in;
}
h3.figure_head {
padding-left: .5in;
}
div.jp-RenderedMarkdown h1 {font-weight: 900; border-bottom:2px black solid; padding-bottom:4px}
div.jp-RenderedMarkdown h2 {font-weight: 850; border-bottom:0.5px black solid; padding-bottom:4px}
div.jp-RenderedMarkdown h3 {font-weight: 800; font-style:italic}
div.jp-RenderedMarkdown p {
max-width:600px;
font-size:150%;
font-family: EB Garamond;
}
div.jp-RenderedMarkdown p code {
font-size: 75%;
}
"""
# from ..display import display_html, HTML
# from .xhtml import tooltipped_style, floating_table_head, _tooltipped_style_css
#
# css = HTML("<style>{}\n\n{}</style>".format(_default_css_jupyter,tooltipped_style().tostring()))
#
# def stylesheet():
# display_html(css)
def default_css():
return """
@import url(https://fonts.googleapis.com/css?family=Roboto:400,700,500italic,100italic|Roboto+Mono:300,400,700);
.error_report {color:red; font-family:monospace;}
body {""" + body_font + """}
div.larch_title {
font-family: "Book-Antiqua", "Palatino", serif;
font-size:200%;
font-weight:900;
font-style:normal;
color: #444444;
}
table {border-collapse:collapse;}
table, th, td {
border: 1px solid #999999;
font-family:"Roboto Mono", monospace;
font-size:90%;
font-weight:400;
}
th, td { padding:2px; }
td.parameter_category {
font-family:"Roboto", monospace;
font-weight:500;
background-color: #f4f4f4;
font-style: italic;
}
th {
font-family:"Roboto", monospace;
font-weight:700;
}
.larch_signature {""" + signature_font + """}
.larch_name_signature {""" + signature_name_font + """}
a.parameter_reference {font-style: italic; text-decoration: none}
.strut2 {min-width:1in}
.histogram_cell { padding-top:1; padding-bottom:1; vertical-align:center; }
.dicta pre {
margin:0;
font-family:"Roboto Mono", monospace;
font-weight:300;
font-size:70%;
}
.raw_log pre {
font-family:"Roboto Mono", monospace;
font-weight:300;
font-size:70%;
}
caption {
caption-side: bottom;
text-align: left;
font-family: Roboto;
font-style: italic;
font-weight: 100;
font-size: 80%;
}
table.dictionary { border:0px hidden !important; border-collapse: collapse !important; }
div.blurb {
margin-top: 15px;
max-width: 6.5in;
}
div.note {
font-size:90%;
padding-left:1em;
padding-right:1em;
border: 1px solid #999999;
border-radius: 4px;
}
p.admonition-title {
font-weight: 700;
}
.tooltipped {
position: relative;
display: inline-block;
}
.tooltipped .tooltiptext {
visibility: hidden;
width: 180px;
background-color: black;
color: #fff;
text-align: center;
border-radius: 6px;
padding: 5px 0;
position: absolute;
z-index: 1;
top: -5px;
left: 110%;
}
.tooltipped .tooltiptext::after {
content: "";
position: absolute;
top: 50%;
right: 100%;
margin-top: -5px;
border-width: 5px;
border-style: solid;
border-color: transparent black transparent transparent;
}
.tooltipped:hover .tooltiptext {
visibility: visible;
}
"""
```
#### File: emat/viz/table.py
```python
import plotly.graph_objs as go
from .widget import FigureWidget
def table_figure(
df,
title=None,
header_color='#C2D4FF',
cell_color='#F5F8FF',
):
trace = go.Table(
header=dict(values=list(df.columns),
fill = dict(color=header_color),
align = ['left'] * len(df.columns)),
cells=dict(values=[df[c] for c in df.columns],
fill = dict(color=cell_color),
align = ['left'] * len(df.columns)),
)
data = [trace]
return FigureWidget(
data=data,
layout=dict(
title=title,
),
metadata=df,
)
```
#### File: emat/viz/widget.py
```python
from plotly.graph_objs import FigureWidget as _FigureWidget
class FigureWidget(_FigureWidget):
"""FigureWidget with metadata."""
def __init__(self, *args, metadata=None, **kwargs):
super().__init__(*args, **kwargs)
self._metadata = metadata if metadata is not None else {}
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, x):
self._metadata = x
```
#### File: workbench/analysis/feature_scoring.py
```python
from operator import itemgetter
import math
import numpy as np
import pandas as pd
from sklearn.ensemble import (ExtraTreesClassifier, ExtraTreesRegressor,
RandomForestClassifier, RandomForestRegressor)
from sklearn.feature_selection import (f_regression, f_classif, chi2)
from .scenario_discovery_util import RuleInductionType
from ..util import get_module_logger
# Created on Jul 9, 2014
#
# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
#
# TODO:: look at
# http://scikit-learn.org/stable/auto_examples/linear_model/plot_sparse_recovery.html#example-linear-model-plot-sparse-recovery-py
__all__ = ['F_REGRESSION', 'F_CLASSIFICATION', 'CHI2',
'get_univariate_feature_scores', 'get_rf_feature_scores',
'get_ex_feature_scores', 'get_feature_scores_all']
_logger = get_module_logger(__name__)
F_REGRESSION = f_regression
F_CLASSIFICATION = f_classif
CHI2 = chi2
def _prepare_experiments(experiments):
'''
transform the experiments structured array into a numpy array.
Parameters
----------
experiments :DataFrame
Returns
-------
ndarray, list
'''
try:
experiments = experiments.drop('scenario', axis=1)
except KeyError:
pass
x = experiments.copy()
x_nominal = x.select_dtypes(exclude=np.number)
x_nominal_columns = x_nominal.columns.values
for column in x_nominal_columns:
if np.unique(x[column]).shape == (1,):
x = x.drop(column, axis=1)
_logger.info(("{} dropped from analysis "
"because only a single category").format(column))
else:
x[column] = x[column].astype('category').cat.codes
return x.values, x.columns.tolist()
def _prepare_outcomes(outcomes, classify):
'''
transform the outcomes dict into a vector with either the class allocation
or the value.
Parameters
----------
outcomes : dict
the outcomes dict
classify : callable or str
a classify function or variable analogous to PRIM
Returns
-------
1d ndarray
the return from classify
bool
data is categorical (True) or continuous (False)
Raises
--------
TypeError
if classify is neither a StringType nor a callable
KeyError
if classify is a string which is not a key in the outcomes dict.
'''
if isinstance(classify, str):
try:
y = outcomes[classify]
except KeyError as e:
raise e
categorical = False
elif callable(classify):
y = classify(outcomes)
categorical = True
else:
raise TypeError("unknown type for classify")
return y, categorical
def get_univariate_feature_scores(x, y, score_func=F_CLASSIFICATION):
'''
calculate feature scores using univariate statistical tests. In case of
categorical data, chi square or the Anova F value is used. In case of
continuous data the Anova F value is used.
Parameters
----------
x : structured array
y : 1D nd.array
score_func : {F_CLASSIFICATION, F_REGRESSION, CHI2}
the score function to use, one of f_regression (regression), or
f_classification or chi2 (classification).
Returns
-------
pandas DataFrame
sorted in descending order of tuples with uncertainty and feature
scores (i.e. p values in this case).
'''
x, uncs = _prepare_experiments(x)
pvalues = score_func(x, y)[1]
pvalues = np.asarray(pvalues)
pvalues = zip(uncs, pvalues)
pvalues = list(pvalues)
pvalues.sort(key=itemgetter(1))
pvalues = pd.DataFrame(pvalues)
pvalues = pvalues.set_index(0)
return pvalues
def get_rf_feature_scores(x, y, mode=RuleInductionType.CLASSIFICATION,
nr_trees=250,
max_features='auto', max_depth=None,
min_samples_split=2, min_samples_leaf=1,
bootstrap=True, oob_score=True, random_state=None):
'''
Get feature scores using a random forest
Parameters
----------
x : structured array
y : 1D nd.array
mode : {RuleInductionType.CLASSIFICATION, RuleInductionType.REGRESSION}
nr_trees : int, optional
nr. of trees in forest (default=250)
max_features : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
max_depth : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
min_samples : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
min_samples_leaf : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
bootstrap : bool, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
oob_score : bool, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
random_state : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
Returns
-------
pandas DataFrame
sorted in descending order of tuples with uncertainty and feature
scores
object
either RandomForestClassifier or RandomForestRegressor
'''
x, uncs = _prepare_experiments(x)
if mode == RuleInductionType.CLASSIFICATION:
rfc = RandomForestClassifier
criterion = 'gini'
elif mode == RuleInductionType.REGRESSION:
rfc = RandomForestRegressor
criterion = 'mse'
else:
raise ValueError('{} not valid for mode'.format(mode))
forest = rfc(n_estimators=nr_trees,
criterion=criterion,
max_features=max_features,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
bootstrap=bootstrap,
oob_score=oob_score,
random_state=random_state)
forest.fit(x, y)
importances = forest.feature_importances_
importances = zip(uncs, importances)
importances = list(importances)
importances.sort(key=itemgetter(1), reverse=True)
importances = pd.DataFrame(importances)
importances = importances.set_index(0)
return importances, forest
def get_ex_feature_scores(x, y, mode=RuleInductionType.CLASSIFICATION,
nr_trees=100, max_features=None, max_depth=None,
min_samples_split=2, min_samples_leaf=None,
min_weight_fraction_leaf=0, max_leaf_nodes=None,
bootstrap=True, oob_score=True, random_state=None):
'''
Get feature scores using extra trees
Parameters
----------
x : structured array
y : 1D nd.array
mode : {RuleInductionType.CLASSIFICATION, RuleInductionType.REGRESSION}
nr_trees : int, optional
nr. of trees in forest (default=250)
max_features : int, float, string or None, optional
by default, it will use number of featers/3, following
Jaxa-Rozen & Kwakkel (2018) doi: 10.1016/j.envsoft.2018.06.011
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
max_depth : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
min_samples_split : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
min_samples_leaf : int, optional
defaults to 1 for N=1000 or lower, from there on
proportional to sqrt of N
(see discussion in Jaxa-Rozen & Kwakkel (2018) doi: 10.1016/j.envsoft.2018.06.011)
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
min_weight_fraction_leaf : float, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
max_leaf_nodes: int or None, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
bootstrap : bool, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
oob_score : bool, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
random_state : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
Returns
-------
pandas DataFrame
sorted in descending order of tuples with uncertainty and feature
scores
object
either ExtraTreesClassifier or ExtraTreesRegressor
'''
x, uncs = _prepare_experiments(x)
# TODO
# max_features = number of variables/3
#
# min_samples_leaf
# 1000 - >
# then proportional based on sqrt of N
# dus sqrt(N) / Sqrt(1000) met 1 als minimumd
if max_features is None:
max_features = int(round(x.shape[1] / 3))
if min_samples_leaf is None:
min_samples_leaf = max(1,
int(round(math.sqrt(x.shape[0]) / math.sqrt(1000))))
if mode == RuleInductionType.CLASSIFICATION:
etc = ExtraTreesClassifier
criterion = 'gini'
elif mode == RuleInductionType.REGRESSION:
etc = ExtraTreesRegressor
criterion = 'mse'
else:
raise ValueError('{} not valid for mode'.format(mode))
extra_trees = etc(n_estimators=nr_trees,
criterion=criterion,
max_features=max_features,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_leaf_nodes=max_leaf_nodes,
bootstrap=bootstrap,
oob_score=oob_score,
random_state=random_state)
extra_trees.fit(x, y)
importances = extra_trees.feature_importances_
importances = zip(uncs, importances)
importances = list(importances)
importances.sort(key=itemgetter(1), reverse=True)
importances = pd.DataFrame(importances)
importances = importances.set_index(0)
return importances, extra_trees
algorithms = {'extra trees': get_ex_feature_scores,
'random forest': get_rf_feature_scores,
'univariate': get_univariate_feature_scores}
def get_feature_scores_all(x, y, alg='extra trees',
mode=RuleInductionType.REGRESSION,
**kwargs):
'''perform feature scoring for all outcomes using the specified feature
scoring algorithm
Parameters
----------
x : numpy structured array
y : dict of 1d numpy arrays
the outcomes, with a string as key, and a 1D array for each outcome
alg : {'extra trees', 'random forest', 'univariate'}, optional
mode : {RuleInductionType.REGRESSION, RuleInductionType.CLASSIFICATION}, optional
kwargs : dict, optional
any remaining keyword arguments will be passed to the specific
feature scoring algorithm
Returns
-------
DataFrame instance
'''
complete = None
for key, value in y.items():
fs, _ = algorithms[alg](x, value, mode=mode, **kwargs)
fs = fs.rename(columns={1: key})
if complete is None:
complete = fs.T
else:
complete = complete.append(fs.T, sort=True)
return complete.T
```
#### File: workbench/connectors/simio_connector.py
```python
import os
import sys
import clr # @UnresolvedImport
# TODO:: do some auto discovery here analogue to netlogo?
sys.path.append('C:/Program Files (x86)/Simio')
clr.AddReference('SimioDLL')
clr.AddReference('SimioAPI')
import SimioAPI # @UnresolvedImport
from ..em_framework import FileModel, SingleReplication
from ..util import CaseError, EMAError
from ..util.ema_logging import get_module_logger, method_logger
# Created on 27 June 2019
#
# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
_logger = get_module_logger(__name__)
class SimioModel(FileModel, SingleReplication):
@method_logger(__name__)
def __init__(self, name, wd=None, model_file=None, main_model=None):
"""interface to the model
Parameters
----------
name : str
name of the modelInterface. The name should contain only
alpha-numerical characters.
working_directory : str
working_directory for the model.
model_file : str
the name of the model file
main_model : str
Raises
------
EMAError
if name contains non alpha-numerical characters
ValueError
if model_file cannot be found
"""
super(SimioModel, self).__init__(name, wd=wd, model_file=model_file)
assert main_model != None
self.main_model_name = main_model
self.output = {}
@method_logger(__name__)
def model_init(self, policy):
super(SimioModel, self).model_init(policy)
_logger.debug('initializing model')
# get project
path_to_file = os.path.join(self.working_directory, self.model_file)
self.project = SimioAPI.ISimioProject(SimioAPI.SimioProjectFactory.LoadProject(path_to_file))
self.policy = policy
# get model
models = SimioAPI.IModels(self.project.get_Models())
model = models.get_Item(self.main_model_name)
if not model:
raise EMAError((f'''main model with name {self.main_model_name} '
'not found'''))
self.model = SimioAPI.IModel(model)
# set up new EMA specific experiment on model
_logger.debug('setting up EMA experiment')
self.experiment = SimioAPI.IExperiment(model.Experiments.Create('ema experiment'))
SimioAPI.IExperimentResponses(self.experiment.Responses).Clear()
# use all available responses as template for experiment responses
responses = get_responses(model)
for outcome in self.outcomes:
for name in outcome.variable_name:
name = outcome.name
try:
value = responses[name]
except KeyError:
raise EMAError(f'response with name \'{name}\' not found')
response = SimioAPI.IExperimentResponse(self.experiment.Responses.Create(name))
response.set_Expression(value.Expression)
response.set_Objective(value.Objective)
# remove any scenarios on experiment
self.scenarios = SimioAPI.IScenarios(self.experiment.Scenarios)
self.scenarios.Clear()
# make control map
controls = SimioAPI.IExperimentControls(self.experiment.get_Controls())
self.control_map = {}
for i in range(controls.Count):
control = controls.get_Item(i)
self.control_map[control.Name] = control
_logger.debug('model initialized successfully')
@method_logger(__name__)
def run_experiment(self, experiment):
self.case = experiment
_logger.debug('Setup SIMIO scenario')
scenario = self.scenarios.Create()
_logger.debug(f'nr. of scenarios is {self.scenarios.Count}')
for key, value in experiment.items():
try:
control = self.control_map[key]
except KeyError:
raise EMAError(('''uncertainty not specified as '
'control in simio model'''))
else:
ret = scenario.SetControlValue(control, str(value))
if ret:
_logger.debug(f'{key} set successfully')
else:
raise CaseError(f'failed to set {key}')
_logger.debug('SIMIO scenario setup completed')
self.experiment.ScenarioEnded += self.scenario_ended
self.experiment.RunCompleted += self.run_completed
_logger.debug('preparing to run model')
self.experiment.Run()
_logger.debug('run completed')
return self.output
@method_logger(__name__)
def reset_model(self):
"""
Method for reseting the model to its initial state. The default
implementation only sets the outputs to an empty dict.
"""
super(SimioModel, self).reset_model()
self.scenarios.Clear()
self.output = {}
@method_logger(__name__)
def scenario_ended(self, sender, scenario_ended_event):
'''scenario ended event handler'''
# ema_logging.debug('scenario ended called!')
# This event handler will be called when all replications for a
# given scenario have completed. At this point the statistics
# produced by this scenario should be available.
experiment = SimioAPI.IExperiment(sender)
scenario = SimioAPI.IScenario(scenario_ended_event.Scenario)
_logger.debug((f'''scenario {scenario.Name} for experiment '
'{experiment.Name} completed'''))
responses = experiment.Scenarios.get_Responses()
# http://stackoverflow.com/questions/16484167/python-net-framework-reference-argument-double
for response in responses:
_logger.debug(f'{response}')
response_value = 0.0
try:
success, response_value = scenario.GetResponseValue(response,
response_value)
except TypeError:
_logger.warning((f'''type error when trying to get a '
'response for {response.Name}'''))
raise
if success:
self.output[response.Name] = response_value
else:
# no valid response value
error = CaseError(f'no valid response for {response.Name}',
self.case)
_logger.exception(str(error))
raise
@method_logger(__name__)
def run_completed(self, sender, run_completed_event):
'''run completed event handler'''
_logger.debug('run completed')
# This event handler is the last one to be called during the run.
# When running async, this is the correct place to shut things down.
experiment = SimioAPI.IExperiment(sender)
# Un-wire from the run events when we're done.
experiment.ScenarioEnded -= self.scenario_ended
experiment.RunCompleted -= self.run_completed
def get_responses(model):
'''Helper function for getting responses
this function gathers all responses defined on all experiments available
on the model.
Parameters
----------
model : SimioAPI.IModel instance
'''
response_map = {}
experiments = SimioAPI.IExperiments(model.Experiments)
for i in range(experiments.Count):
experiment = SimioAPI.IExperiment(experiments.get_Item(i))
responses = SimioAPI.IExperimentResponses(experiment.Responses)
for j in range(responses.Count):
response = SimioAPI.IExperimentResponse(responses.get_Item(j))
response_map[response.Name] = response
return response_map
```
#### File: tmip-emat/tests/test_learn.py
```python
import pandas
from emat.learn.feature_selection import SelectUniqueColumns
def test_select_unique_columns():
df = pandas.DataFrame({
'Aa': [1,2,3,4,5,6,7],
'Bb': [4,6,5,4,6,2,2],
'Cc': [1,2,3,4,5,6,7],
'Dd': [4,5,6,7,8,8,2],
'Ee': [10,20,30,40,50,60,70],
'Ff': [44,55,66,77,88,88,22],
})
s = SelectUniqueColumns().fit(df)
pandas.testing.assert_frame_equal(s.transform(df), df[['Aa','Bb','Dd']])
```
#### File: tmip-emat/tests/test_scope.py
```python
import unittest
import pytest
import os
import emat
from emat.scope.scope import Scope, ScopeError
from emat.scope.box import Box, ChainedBox, Boxes
from emat import package_file
from emat.database.sqlite.sqlite_db import SQLiteDB
from emat import config
class TestScopeMethods(unittest.TestCase):
'''
tests parsing scope file
'''
#
# one time test setup
#
scope_file = emat.package_file("model", "tests", "model_test.yaml")
db_test = SQLiteDB(
config.get("test_db_filename", ":memory:"),
initialize=True,
)
#
# Tests
#
def test_dump_scope(self):
scp = Scope(self.scope_file)
dumped = scp.dump()
# print("="*40)
# print(dumped)
# print("="*40)
loaded = Scope(scope_def=dumped, scope_file="fake/filename.yaml")
assert loaded == scp # filename is intentionally different but let it go
# but everything else is the same
assert loaded.name == scp.name
assert loaded.get_measures() == scp.get_measures()
assert loaded.get_parameters() == scp.get_parameters()
assert loaded.scope_file != scp.scope_file
assert loaded.scope_file == "fake/filename.yaml"
# fix name, still get equality
loaded.scope_file = scp.scope_file
assert loaded == scp
def test_save_scope(self):
scp = Scope(self.scope_file)
scp.store_scope(self.db_test)
def test_null_scope(self):
scp = Scope(None)
assert repr(scp) == "<emat.Scope with no content>"
assert len(scp.get_measures()) == 0
assert len(scp.get_parameters()) == 0
def test_box(self):
scope = Scope(package_file('model','tests','road_test.yaml'))
with pytest.raises(TypeError):
s = Box(scope=scope)
s = Box(name="Speedy", scope=scope)
s.set_upper_bound('build_travel_time', 70)
with pytest.raises(ScopeError):
s.set_upper_bound('not_a_thing', 70)
assert len(s) == 1
assert 'build_travel_time' in s
assert s.parent_box_name is None
s2 = Box(name="Notable", scope=scope, parent="Speedy")
s2.set_lower_bound('expand_capacity', 20)
assert len(s2) == 1
assert 'build_travel_time' not in s2
assert s2.parent_box_name == 'Speedy'
def test_box_universe(self):
scope = Scope(package_file('model','tests','road_test.yaml'))
s = Box(name="Speedy", scope=scope)
s.set_upper_bound('build_travel_time', 70)
s2 = Box(name="Notable", scope=scope, parent="Speedy")
s2.set_lower_bound('expand_capacity', 20)
u = Boxes(s, s2, scope=scope)
assert u.fancy_names() == ['Scope: EMAT Road Test', '▶ Speedy', '▷ ▶ Notable']
assert u.plain_names() == [None, 'Speedy', 'Notable']
def test_read_write_box(self):
scope = Scope(package_file('model','tests','road_test.yaml'))
db = SQLiteDB()
scope.store_scope(db)
s1 = Box(name="Speedy", scope=scope)
s1.set_upper_bound('build_travel_time', 70)
s1.relevant_features.add('debt_type')
s2 = Box(name="Notable", scope=scope, parent="Speedy")
s2.set_lower_bound('expand_capacity', 20)
db.write_box(s1)
db.write_box(s2)
s1_ = db.read_box(scope.name, "Speedy")
s2_ = db.read_box(scope.name, "Notable")
assert s1 == s1_
assert s2 == s2_
assert s1.thresholds == s1_.thresholds
assert s2.thresholds == s2_.thresholds
assert s1.relevant_features == s1_.relevant_features
assert s2.relevant_features == s2_.relevant_features
def test_read_write_boxes(self):
scope = Scope(package_file('model','tests','road_test.yaml'))
db = SQLiteDB()
scope.store_scope(db)
s1 = Box(name="Speedy", scope=scope)
s1.set_upper_bound('build_travel_time', 70)
s2 = Box(name="Notable", scope=scope, parent="Speedy")
s2.set_lower_bound('expand_capacity', 20)
u = Boxes(s1, s2, scope=scope)
db.write_boxes(u)
scope2 = Scope(package_file('model','tests','road_test.yaml'))
u2 = db.read_boxes(scope=scope2)
assert u == u2
assert u["Notable"].parent_box_name == u2["Notable"].parent_box_name
s1_ = db.read_box(scope.name, "Speedy")
s2_ = db.read_box(scope.name, "Notable")
assert s1 == s1_
assert s2 == s2_
assert s1.relevant_features == s1_.relevant_features
assert s2.relevant_features == s2_.relevant_features
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jinscoe123/dph",
"score": 3
} |
#### File: dph/dph/pollard.py
```python
from .config import *
MAX_RETRIES = 3
__all__ = [ 'MAX_RETRIES', 'pollard' ]
def pollard(G, H, P, a=1, b=1):
Q = (P - 1) // 2
def xab(x, a, b):
xsub = x % 3
if xsub == 0:
x = (x * G) % P
a = (a + 1) % Q
if xsub == 1:
x = (x * H) % P
b = (b + 1) % Q
if xsub == 2:
x = (x * x) % P
a = (a * 2) % Q
b = (b * 2) % Q
return (x, a, b)
x = G * H
X = x
A = a
B = b
if Config.debug:
w1 = len(str(P))
w2 = len(str(Q))
fmts = f''.join([
'i'.rjust(w1),
' ',
' ',
'x'.rjust(w1),
' ',
'a'.rjust(w2),
' ',
'b'.rjust(w2),
' ',
' ',
'X'.rjust(w1),
' ',
'A'.rjust(w2),
' ',
'B'.rjust(w2),
])
print('-' * len(fmts))
print(fmts)
print('-' * len(fmts))
fmts = f''.join([
f'{{:{w1}d}}',
f' ',
f' ',
f'{{:{w1}d}}',
f' ',
f'{{:{w2}d}}',
f' ',
f'{{:{w2}d}}',
f' ',
f' ',
f'{{:{w1}d}}',
f' ',
f'{{:{w2}d}}',
f' ',
f'{{:{w2}d}}',
])
for _ in range(1, P):
x, a, b = xab(x, a, b)
X, A, B = xab(X, A, B)
X, A, B = xab(X, A, B)
if Config.debug:
print(fmts.format(_, x, a, b, X, A, B))
if x == X:
break
result = ((a - A) * pow(B - b, -1, Q)) % Q
if pow(G, result, P) == H:
return result
result += Q
if pow(G, result, P) == H:
return result
raise ValueError
def _main():
from gmpy2 import mpz
import math
import random
import sys
if len(sys.argv) != 4:
print('Usage: pollard g h p')
print()
print(' Solve the discrete logarithm using Pollard Rho\'s algorithm for discrete logarithms.')
print()
print(' i.e. Find an integer x s.t. g^x = h (mod p).')
sys.exit(0)
g = mpz(sys.argv[1])
h = mpz(sys.argv[2])
p = mpz(sys.argv[3])
a = mpz(1)
b = mpz(1)
w1 = 1 + int(math.log10(MAX_RETRIES))
w2 = 1 + int(math.log10(p))
err_fmts = f'Attempt #{{:{w1}}} failed -- retrying with a = {{:{w2}}}, b = {{:{w2}}}...'
for i in range(MAX_RETRIES):
try:
x = pollard(g, h, p, a, b)
if Config.debug:
print()
print(f'x = {x}')
break
except ValueError:
a = mpz(random.randint(1, p))
b = mpz(random.randint(1, p))
if Config.debug:
print()
print(err_fmts.format(i, a, b))
else:
print()
print(f'Maximum number of attempts reached. Failed to solve {g}^x = {h} (mod {p}).')
sys.exit(1)
if __name__ == '__main__':
_main()
```
#### File: dph/dph/util.py
```python
from functools import reduce
import operator
__all__ = [ 'product' ]
def product(*args):
return reduce(operator.mul, args)
``` |
{
"source": "jinsen47/LeanTest",
"score": 2
} |
#### File: jinsen47/LeanTest/app.py
```python
import json
from datetime import datetime
import leancloud
import requests
from flask import Flask, request, Response
from flask import render_template
from flask_sockets import Sockets
from views.todos import todos_view
app = Flask(__name__)
sockets = Sockets(app)
# 动态路由
app.register_blueprint(todos_view, url_prefix='/todos')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/time')
def time():
return str(datetime.now())
@app.route('/gank/<category>/<page>')
def gank(category, page):
url = 'http://gank.io/api/search/query/listview/category/%s/count/5/page/%s' % (category, page)
requestAPI = requests.get(url)
return Response(json.dumps(requestAPI.json()), mimetype='application/json')
@sockets.route('/echo')
def echo_socket(ws):
while True:
message = ws.receive()
ws.send(message)
``` |
{
"source": "jinseong0525/tools",
"score": 3
} |
#### File: tools/labelme/json2dataset.py
```python
import json
import os
import os.path as osp
import imgviz
import PIL.Image
import numpy as np
from labelme import utils
JSON_DIR = './labelme/data/labeled/'
SAVE_DIR = './labelme/data/dataset/'
def main(JSON_DIR, SAVE_DIR):
# read .json file list
_, _, jsons = next(os.walk(JSON_DIR))
jsons = [s for s in jsons if ".json" in s]
# take the label_names.txt
with open(osp.join(JSON_DIR, "label_names.txt"), "r") as f:
cnt = 0
label_name_to_value = {}
for line in f:
label_name_to_value[line.rstrip('\n')] = cnt
cnt += 1
for json_file in jsons:
# read json
data = json.load(open(JSON_DIR + json_file))
# read image
imageData = data.get("imageData")
if not imageData:
imagePath = os.path.join(JSON_DIR, data["imagePath"])
img = np.asarray(PIL.Image.open(imagePath))
else:
img = utils.img_b64_to_arr(imageData)
with open(osp.join(JSON_DIR, "label_names.txt"), "r") as f:
cnt = 0
label_name_to_value = {}
for line in f:
label_name_to_value[line.rstrip('\n')] = cnt
cnt += 1
# make a label data
lbl, _ = utils.shapes_to_label(
img.shape, data["shapes"], label_name_to_value
)
# make a viz data
label_names = [None] * (max(label_name_to_value.values()) + 1)
for name, value in label_name_to_value.items():
label_names[value] = name
lbl_viz = imgviz.label2rgb(
label=lbl, img=imgviz.asgray(img), label_names=label_names, loc="rb"
)
# save dataset
_, name, _ = json_file.replace('.', '_').split('_')
PIL.Image.fromarray(img).save(osp.join(SAVE_DIR, "img_" + name + ".png"))
utils.lblsave(osp.join(SAVE_DIR, "label_" + name + ".png"), lbl)
PIL.Image.fromarray(lbl_viz).save(osp.join(SAVE_DIR, "viz_" + name + ".png"))
with open(osp.join(SAVE_DIR, "label_names.txt"), "w") as f:
for lbl_name in label_names:
f.write(lbl_name + "\n")
if __name__ == "__main__":
main(JSON_DIR, SAVE_DIR)
``` |
{
"source": "JinseongHwang/pyqt5-lecture",
"score": 3
} |
#### File: pyqt5-lecture/basics/06-MenuBar.py
```python
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QAction
from PyQt5.QtGui import QIcon
class MyApp(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.statusBar() # 빈 statusBar 생성
# Icon, Text, 위치하는 부모 객체
exitAction = QAction(QIcon(
'C:\\Users\\User\\Desktop\\my-lecture-material\\pyqt5-lecture\\resources\\exit.png'), 'Exit', self)
exitAction.setShortcut('Ctrl+Q') # 단축키 Ctrl + Q 사용 가능
exitAction.setStatusTip('Exit application') # statusBar 에 나타날 텍스트 지정
# 이 동작을 선택했을 때 생성된 시그널이 quit 함수에 연결되어 Application 이 종료됨
exitAction.triggered.connect(QApplication.quit)
menuBar = self.menuBar() # 메뉴바 생성
menuBar.setNativeMenuBar(False) # MacOS 에서도 동작하도록 하는 설정
# File 이라는 메뉴를 추가 (Alt + F 로 선택 가능)
fileMenu = menuBar.addMenu('&File') # & 위치로 단축키 설정
fileMenu.addAction(exitAction) # File 메뉴에 exitAction 을 추가
self.setWindowTitle('Menubar')
self.setGeometry(300, 300, 300, 200)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyApp()
sys.exit(app.exec_())
```
#### File: pyqt5-lecture/calculator/main.py
```python
import sys
import traceback
import logging
from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QLineEdit, QGridLayout, QLayout)
from PyQt5.QtCore import Qt
class MyApp(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def getResult(self):
formula = self.display.text()
try:
result = eval(formula)
except SyntaxError:
result = '잘못된 식입니다.'
except Exception:
result = '다시 입력해주세요.'
logging.error(traceback.format_exc())
self.display.setText(str(result))
def getBtn(self, number):
numberBtn = QPushButton(str(number))
numberBtn.clicked.connect(lambda: self.display.setText(self.display.text() + str(number)))
return numberBtn
def setButtons(self):
clearBtn = QPushButton('Clear')
clearBtn.clicked.connect(lambda: self.display.setText(''))
delBtn = QPushButton('Del')
delBtn.clicked.connect(lambda: self.display.setText(self.display.text()[:-1]))
resultBtn = QPushButton('=')
resultBtn.clicked.connect(self.getResult)
self.gridLayout.addWidget(self.display, 0, 0, 1, 4)
self.gridLayout.addWidget(clearBtn, 1, 0, 1, 2)
self.gridLayout.addWidget(delBtn, 1, 2, 1, 2)
self.gridLayout.addWidget(self.getBtn('1'), 2, 0)
self.gridLayout.addWidget(self.getBtn('2'), 2, 1)
self.gridLayout.addWidget(self.getBtn('3'), 2, 2)
self.gridLayout.addWidget(self.getBtn('+'), 2, 3)
self.gridLayout.addWidget(self.getBtn('4'), 3, 0)
self.gridLayout.addWidget(self.getBtn('5'), 3, 1)
self.gridLayout.addWidget(self.getBtn('6'), 3, 2)
self.gridLayout.addWidget(self.getBtn('-'), 3, 3)
self.gridLayout.addWidget(self.getBtn('7'), 4, 0)
self.gridLayout.addWidget(self.getBtn('8'), 4, 1)
self.gridLayout.addWidget(self.getBtn('9'), 4, 2)
self.gridLayout.addWidget(self.getBtn('*'), 4, 3)
self.gridLayout.addWidget(self.getBtn('0'), 5, 0)
self.gridLayout.addWidget(self.getBtn('.'), 5, 1)
self.gridLayout.addWidget(resultBtn, 5, 2)
self.gridLayout.addWidget(self.getBtn('/'), 5, 3)
def initUI(self):
self.display = QLineEdit() # Line editor 생성
self.display.setReadOnly(True) # 입력이 되지 않도록
self.display.setAlignment(Qt.AlignRight) # 우측으로 정렬
self.display.setStyleSheet(
"border:0px; font-size:20pt; font-family:Nanum Gothic; font-weight:bold; padding:10px"
)
self.gridLayout = QGridLayout()
self.setLayout(self.gridLayout)
self.gridLayout.setSizeConstraint(QLayout.SetFixedSize)
self.setButtons()
self.setWindowTitle('My Calculator')
self.setFixedSize(300, 400) # 고정된 크기 설정
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyApp()
sys.exit(app.exec_())
```
#### File: pyqt5-lecture/layout/02-BoxLayout.py
```python
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QVBoxLayout
from PyQt5.QtCore import QCoreApplication
class MyApp(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
# 버튼 3개 생성
okButton = QPushButton('OK')
cancelButton = QPushButton('Cancel')
exitButton = QPushButton('Exit')
exitButton.clicked.connect(QCoreApplication.instance().quit) # 종료 기능
# 수평 박스, [좌:우 = 1:1]
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(okButton)
hbox.addWidget(cancelButton)
hbox.addWidget(exitButton)
hbox.addStretch(1)
# 수직 박스, [상:하 = 3:1]
vbox = QVBoxLayout()
vbox.addStretch(3)
vbox.addLayout(hbox)
vbox.addStretch(1)
# 수직 박스를 창의 메인 레이아웃으로 설정
self.setLayout(vbox)
self.setWindowTitle('Box Layout')
self.setGeometry(300, 300, 300, 200)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyApp()
sys.exit(app.exec_())
``` |
{
"source": "jinserk/matk",
"score": 2
} |
#### File: matk/examples/test.py
```python
from pathlib import Path
from ase import Atoms
from ase.calculators.emt import EMT
from ase.calculators.qchem import QChem
from ase.optimize import LBFGS, QuasiNewton
from ase.vibrations import Vibrations
from gpaw import PW, FermiDirac
from c2tk.conformer import get_atoms, pre_optimize
from c2tk.calculators.nwchem import NWChem
from c2tk.calculators.orca import ORCA
from c2tk.calculators.gpaw import GPAW
def test_qchem(atoms: Atoms) -> None:
calc = QChem(
label='qchem',
method='B3LYP',
basis='6-31+G*',
np=1, nt=4,
)
atoms.calc = calc
#opt = LBFGS(atoms)
#opt.run(fmax=0.05)
e1 = atoms.get_potential_energy()
print(f'test molecule energy: {e1:5.2f} eV')
def test_nwchem(atoms:Atoms) -> None:
calc = NWChem(label='nwchem',
dft=dict(
maxiter=2000,
xc='B3LYP',
),
basis='6-31+G*',
)
atoms.calc = calc
e1 = atoms.get_potential_energy()
print(f'test molecule energy: {e1:5.2f} eV')
"""
opt = LBFGS(atoms)
opt.run(fmax=0.05)
e2 = atoms.get_potential_energy()
print(f'test molecule energy: {e2:5.2f} eV')
obj = NWChemWrapper(nproc=1, mem=8000)
calc_params = {
'basis': '6-31+G*',
'func': 'B3LYP',
'target': 1,
}
e, f, p = obj.geom_opt(atoms, label="test", calc_params=calc_params)
print(atoms.positions)
"""
def test_gpaw(atoms: Atoms) -> None:
calc = GPAW(
mode=PW(),
xc='PBE',
occupations=FermiDirac(0.0, fixmagmom=True),
txt='temp.gpo',
)
atoms.calc = calc
e1 = atoms.get_potential_energy()
print(f'test molecule energy: {e1:5.2f} eV')
"""
relax = QuasiNewton(atoms, logfile='qn.log')
relax.run(fmax=0.05)
e2 = atoms.get_potential_energy()
print(f'test molecule energy: {e2:5.2f} eV')
"""
calc.write('temp.gpw')
def test_orca(atoms: Atoms) -> None:
calc = ORCA(
label='temp',
orcasimpleinput='tightscf B3LYP/G def2-SVP kdiis opt freq',
orcablocks='%scf maxiter 200 end\n%pal nprocs 8 end',
)
atoms.calc = calc
e1 = atoms.get_potential_energy()
print(f'test molecule energy: {e1:5.2f} eV')
"""
relax = QuasiNewton(atoms, logfile='qn.log')
relax.run(fmax=0.05)
e2 = atoms.get_potential_energy()
print(f'test molecule energy: {e2:5.2f} eV')
"""
vib = Vibrations(atoms)
vib.run()
vib.summary()
def main(smiles: str) -> None:
atoms = get_atoms(smiles)
a1 = atoms.copy()
a1.center(vacuum=50.0)
a1.calc = EMT()
e1 = a1.get_potential_energy()
print(f'test molecule energy: {e1:5.2f} eV')
"""
a2 = atoms.copy()
a2.center(vacuum=50.0)
a2 = pre_optimize(a2)
a2.calc = EMT()
e2 = a2.get_potential_energy()
print(f'test molecule energy: {e2:5.2f} eV')
"""
test_orca(atoms)
#test_gpaw(atoms)
#test_nwchem(atoms)
if __name__ == "__main__":
smiles = "c1ccc2c(c1)[nH]c1ccc(-n3c4ccccc4c4ccccc43)cc12"
main(smiles)
```
#### File: c2tk/calculators/calculator.py
```python
import os
import subprocess as sp
import shlex
from collections.abc import Iterable
from ase.calculators.calculator import (
Calculator, FileIOCalculator,
all_changes, CalculationFailed
)
from .. import settings, is_mpi_enabled
class C2TKFileIOCalculator(FileIOCalculator):
def __init__(self, mpi_embed_cmd=False, *args, **kwargs):
if 'directory' not in kwargs:
kwargs['directory'] = settings.SCRATCH_PATH
super().__init__(*args, **kwargs)
self.mpi_embed_cmd = mpi_embed_cmd
def calculate(self, atoms=None, properties=['energy'],
system_changes=all_changes):
Calculator.calculate(self, atoms, properties, system_changes)
self.write_input(self.atoms, properties, system_changes)
self.execute()
self.read_results()
def execute(self):
if self.command is None or not isinstance(self.command, str):
raise CalculatorSetupError(
'Please set ${} environment variable '
.format('ASE_' + self.name.upper() + '_COMMAND') +
'or supply the command keyword')
command = self.command
command = command.replace('PREFIX', self.prefix)
if is_mpi_enabled() and not self.mpi_embed_cmd:
command = f'mpiexec -np {settings.NPROC} {command}'
#real_command = shlex.split(command)
#print(real_command)
try:
proc = sp.Popen(command, cwd=self.directory, shell=True, env=settings.env)
except OSError as err:
# Actually this may never happen with shell=True, since
# probably the shell launches successfully. But we soon want
# to allow calling the subprocess directly, and then this
# distinction (failed to launch vs failed to run) is useful.
msg = 'Failed to execute "{}"'.format(command)
raise EnvironmentError(msg) from err
errorcode = proc.wait()
if errorcode:
path = os.path.abspath(self.directory)
msg = ('Calculator "{}" failed with command "{}" failed in '
'{} with error code {}'.format(self.name, command,
path, errorcode))
raise CalculationFailed(msg)
``` |
{
"source": "jinserk/matorage",
"score": 3
} |
#### File: matorage/data/saver.py
```python
import os
import sys
import uuid
import atexit
import tempfile
import tables as tb
import numpy as np
from functools import reduce
from minio import Minio
from matorage.nas import NAS
from matorage.utils import is_tf_available, is_torch_available, check_nas
from matorage.uploader import Uploader
_KB = 1024
"""The size of a Kilobyte in bytes"""
_MB = 1024 * _KB
"""The size of a Megabyte in bytes"""
class DataSaver(object):
"""
This class must be created independently for the process. The independent process uses
multiple threads to upload to storage and generates unique metadata information when upload is complete.
Update the file, push the upload queue if it exceeds a certain size, close the file, and create a new file.
After saving, you should disconnect the data saver.
To make This procedure easier to understand, the following is written in the pseudo-code.
.. code-block::
per_one_batch_data_size = array_size // num_batch
per_one_file_batch_size = max_object_size // per_one_batch_data_size
for batch_idx in range(num_batch):
if get_current_stored_batch_size() < per_one_file_batch_size:
file.append(data[batch_idx])
else:
file_closing()
new_file is opened
new_file.append(data[batch_idx])
All files are closed.
Note:
- Deep Learning Framework Type : All(pure python is also possible)
- **All processes should call the constructors of this class independently.**
- After data save is over, you must disconnect through the disconnect function.
Args:
config (:obj:`matorage.DataConfig`, **require**):
A DataConfig instance object
multipart_upload_size (:obj:`integer`, optional, defaults to `5 * 1024 * 1024`):
size of the incompletely uploaded object.
You can sync files faster with `multipart upload in MinIO. <https://github.com/minio/minio-py/blob/master/minio/api.py#L1795>`_
This is because MinIO clients use multi-threading, which improves IO speed more
efficiently regardless of Python's Global Interpreter Lock(GIL).
num_worker_threads (:obj:`integer`, optional, defaults to 4):
number of backend storage worker to upload or download.
inmemory (:obj:`boolean`, optional, defaults to `False`):
If you use this value as `True`, then you can use `HDF5_CORE driver <https://support.hdfgroup.org/HDF5/doc/TechNotes/VFL.html#TOC1>`_
so the temporary file for uploading or downloading to backend storage,
such as MinIO, is not stored on disk but is in the memory.
Keep in mind that using memory is fast because it doesn't use disk IO, but it's not always good.
If default option(False), then `HDF5_SEC2` driver will be used on posix OS(or `HDF5_WINDOWS` in Windows).
refresh (:obj:`boolean`, optional, defaults to `False`):
All existing data is erased and overwritten.
Single Process example
Examples::
import numpy as np
from tqdm import tqdm
from matorage import DataConfig, DataSaver
data_config = DataConfig(
endpoint='127.0.0.1:9000',
access_key='minio',
secret_key='miniosecretkey',
dataset_name='array_test',
attributes=[
('array', 'uint8', (3, 224, 224)),
]
)
data_saver = DataSaver(config=data_config)
row = 100
data = np.random.rand(64, 3, 224, 224)
for _ in tqdm(range(row)):
data_saver({
'array' : data
})
data_saver.disconnect()
"""
def __init__(
self,
config,
multipart_upload_size=5 * _MB,
num_worker_threads=4,
inmemory=False,
refresh=False,
):
self.config = config
# Storage configuration
self.multipart_upload_size = multipart_upload_size
self.num_worker_threads = num_worker_threads
# HDF5 configuration
self.inmemory = inmemory
self.filter = tb.Filters(**config.compressor)
self._filelist = []
self._file, self._earray = self._get_newfile()
self._disconnected = False
self._client = (
Minio(
endpoint=self.config.endpoint,
access_key=self.config.access_key,
secret_key=self.config.secret_key,
secure=self.config.secure,
region=self.config.region,
)
if not check_nas(self.config.endpoint)
else NAS(self.config.endpoint)
)
self._check_and_create_bucket(refresh=refresh)
self._uploader = Uploader(
client=self._client,
bucket=self.config.bucket_name,
num_worker_threads=self.num_worker_threads,
multipart_upload_size=self.multipart_upload_size,
inmemory=self.inmemory,
)
atexit.register(self._exit)
def _append_file(self):
"""
upload file to key called `<bucket_name>/key`.
appended data is `Dict[str, str]`
**`value` is file path of `str` type**
example:
{
'key' : 'value.txt',
}
"""
for key, filepath in self._datas.items():
self._uploader.set_queue(
local_file=filepath,
remote_file=key,
)
self.config.set_files(key)
def _append_numpy(self):
"""
append numpy array in `name` node.
appended data is `Dict[str, numpy.ndarray]` type.
**`value` is `numpy.ndarray` type with (B, *) shape, B means batch size**
example:
{
'image' : np.random.rand(16, 28, 28),
'target' : np.random.rand(16)
}
"""
array_size = self._get_array_size()
bzs = list(self._datas.values())[0].shape[0]
per_one_batch_data_size = array_size // bzs
per_one_file_batch_size = max(
1, self.config.max_object_size // per_one_batch_data_size
)
for batch_idx in range(bzs):
if self._get_current_stored_batch_size() < per_one_file_batch_size:
for name, array in self._datas.items():
self._earray[name].append(array[batch_idx, None])
else:
self._file_closing()
self._file, self._earray = self._get_newfile()
for name, array in self._datas.items():
self._earray[name].append(array[batch_idx, None])
def _check_and_create_bucket(self, refresh):
if not self._client.bucket_exists(self.config.bucket_name):
self._client.make_bucket(
self.config.bucket_name, location=self.config.region
)
elif refresh:
objects = self._client.list_objects(self.config.bucket_name, recursive=True)
for obj in objects:
self._client.remove_object(self.config.bucket_name, obj.object_name)
def _check_attr_name(self, name):
"""
check attribute names is exist
"""
if name not in self._earray.keys():
raise KeyError("attribute name {} is not exist!".format(name))
def _check_data_filetype(self):
"""
Check data which is file type
"""
if not isinstance(self._datas, dict):
raise TypeError("datas shoud be dict type.", self.__call__.__doc__)
for key, filepath in self._datas.items():
if not os.path.exists(filepath):
raise FileNotFoundError("{} is not found".format(filepath))
def _check_data_numpytype(self):
"""
Check data which is numpy array type
"""
if not isinstance(self._datas, dict):
raise TypeError("datas shoud be dict type.", self.__call__.__doc__)
bzs = 0
for name, array in self._datas.items():
self._check_attr_name(name=name)
if is_tf_available() and not isinstance(array, np.ndarray):
array = array.numpy()
if is_torch_available() and not isinstance(array, np.ndarray):
array = array.numpy()
assert isinstance(array, np.ndarray), "array type is not `numpy.ndarray`"
if bzs:
if bzs != array.shape[0]:
raise ValueError("each datas array batch sizes are not same.")
else:
bzs = array.shape[0]
# This resape is made into a (B, *) shape.
# Shape is lowered to two contiguous dimensions, enabling IO operations to operate very quickly.
# https://www.slideshare.net/HDFEOS/caching-and-buffering-in-hdf5#25
if len(array.shape) == 1:
# this array is ground truth
array = array.reshape(-1, 1)
self._datas[name] = array.reshape(
-1, reduce(lambda x, y: x * y, array.shape[1:])
)
def __call__(self, datas, filetype=False):
"""
Args:
datas (:obj:`Dict[str, numpy.ndarray] or Dict[str, str]`, **require**):
if filetype is false, `datas` is `Dict[str, numpy.ndarray]` type, **`value` is `numpy.ndarray` type with (B, *) shape, B means batch size**.
else true, `datas` is `Dict[str, str]` type, **`value` is file path of `str` type**.
filetype (:obj:`boolean`, optional):
Indicates whether the type of data to be added to this bucket is a simple file type.
Examples::
data_saver = DataSaver(config=data_config)
data_saver({
'image' : np.random.rand(16, 28, 28),
'target' : np.random.rand(16)
})
When used as shown below, filetype data is saved with a key called `<bucket_name>/raw_image`.
Examples::
data_saver = DataSaver(config=data_config)
data_saver({
'raw_image' : 'test.jpg'
})
print(data_config.get_filetype_list)
"""
self._disconnected = False
self._datas = datas
if not filetype:
self._check_data_numpytype()
self._append_numpy()
else:
self._check_data_filetype()
self._append_file()
def _file_closing(self):
_length = len(list(self._earray.values())[0])
_last_index = self.config.get_length
if not self.inmemory:
self._file.close()
self._uploader.set_queue(
local_file=self._file.filename,
remote_file=os.path.basename(self._filename),
)
else:
self._uploader.set_queue(
local_file=self._file.get_file_image(),
remote_file=os.path.basename(self._filename),
)
self._file.close()
# Set filename indexer
_current_index = _last_index + _length
self.config.set_indexer(
{
_current_index: {
"name": os.path.basename(self._filename),
"length": _length,
}
}
)
def _create_name(self, length=16):
return tempfile.mktemp("{}.h5".format(uuid.uuid4().hex[:length]))
def _exit(self):
self._file.close()
self._disconnected = True
def _get_array_size(self):
"""
Get size of all array .
Returns:
:obj:`datas size(bytes)`
"""
size = 0
for name, array in self._datas.items():
size += array.nbytes
return size
def _get_current_stored_batch_size(self):
"""
Get current file stored batch size
Returns:
:obj:`integer`: current stored batch size in a opened file.
"""
return len(list(self._earray.values())[0])
def _get_newfile(self):
"""
Get new file inode and it's attribute
Returns:
:obj:`tuple(tables.File, dict)`
second item is pytable's attribute
{
'name1' : tables.EArray, 'name2' : tables.EArray
}
"""
_driver, _driver_core_backing_store = self._set_driver()
self._filename = self._create_name()
self._filelist.append(self._filename)
file = tb.open_file(
self._filename,
"a",
driver=_driver,
driver_core_backing_store=_driver_core_backing_store,
)
# create expandable array
earray = {}
for _earray in self.config.flatten_attributes:
earray[_earray.name] = file.create_earray(
file.root,
_earray.name,
_earray.type,
shape=tuple([0]) + _earray.shape,
filters=self.filter,
)
return (file, earray)
def _get_size(self):
if self.inmemory:
return sys.getsizeof(self._file.get_file_image())
else:
return self._file.get_filesize()
def _set_driver(self):
"""
Setting HDF5 driver type
Returns:
:obj:`str` : HDF5 driver type string
"""
if self.inmemory:
return "H5FD_CORE", False
else:
if os.name == "posix":
return "H5FD_SEC2", True
elif os.name == "nt":
return "H5FD_WINDOWS", True
else:
raise ValueError("{} OS not supported!".format(os.name))
@property
def get_downloaded_dataset(self):
"""
get local paths of downloaded dataset in local storage
Returns:
:obj:`list`: local path of downloaded datasets
"""
return self._filelist
def disconnect(self):
"""
disconnecting datasaver. close all opened files and upload to backend storage.
Must be called after ``datasaver`` function to store data safely.
Examples::
data_saver = DataSaver(config=data_config)
data_saver({
'image' : np.random.rand(16, 28, 28),
'target' : np.random.rand(16)
})
data_saver.disconnect()
"""
self._file_closing()
self._uploader.join_queue()
# metadata set
key = <KEY>]
_metadata_file = tempfile.mktemp(f"{key}.json")
self.config.metadata.to_json_file(_metadata_file)
self._client.fput_object(
self.config.bucket_name, f"metadata/{key}.json", _metadata_file
)
os.remove(_metadata_file)
@property
def get_disconnected(self):
return self._disconnected
```
#### File: matorage/matorage/nas.py
```python
import os
import shutil
class Obj(object):
def __init__(self, object_name):
self.object_name = object_name
class NAS(object):
def __init__(self, path):
self.path = path
def bucket_exists(self, bucket_name):
return os.path.exists(os.path.join(self.path, bucket_name))
def fget_object(self, bucket_name, object_name, file_path):
pass
def fput_object(self, bucket_name, object_name, file_path, part_size=None):
_filename = os.path.join(self.path, bucket_name, object_name)
if not os.path.exists(os.path.dirname(_filename)):
os.makedirs(os.path.dirname(_filename))
shutil.copyfile(src=file_path, dst=_filename)
def get_object(self, bucket_name, object_name):
_filename = os.path.join(self.path, bucket_name, object_name)
return open(_filename, "rb")
def put_object(self, bucket_name, object_name, data, length, part_size=None):
_filename = os.path.join(self.path, bucket_name, object_name)
if not os.path.exists(os.path.dirname(_filename)):
os.makedirs(os.path.dirname(_filename))
data.seek(0)
with open(_filename, "wb") as f:
shutil.copyfileobj(data, f, length=length)
def list_objects(self, bucket_name, prefix="", recursive=False):
_foldername = os.path.join(self.path, bucket_name)
if not recursive:
objects = os.listdir(_foldername)
else:
objects = [
os.path.join(dp, f) for dp, dn, fn in os.walk(_foldername) for f in fn
]
return [Obj(o) for o in objects if o.startswith(prefix)]
def make_bucket(self, bucket_name, location):
os.makedirs(os.path.join(self.path, bucket_name))
def remove_bucket(self, bucket_name):
shutil.rmtree(os.path.join(self.path, bucket_name))
def remove_object(self, bucket_name, object_name):
os.remove(os.path.join(self.path, bucket_name, object_name))
```
#### File: matorage/tests/test_suite.py
```python
import sys
import unittest
from matorage.utils import is_torch_available, is_tf_available
def suite():
test_modules = [
"tests.test_datasaver",
]
if is_torch_available():
test_modules.extend([
"tests.test_torch_data",
"tests.test_torch_model",
"tests.test_torch_optimizer",
])
if is_tf_available():
test_modules.extend([
"tests.test_tf_data",
"tests.test_tf_model",
"tests.test_tf_optimizer",
])
alltests = unittest.TestSuite()
for name in test_modules:
# Unexpectedly, the following code doesn't seem to work anymore
# in python 3
# exec('from %s import suite as test_suite' % name)
__import__(name)
test_suite = sys.modules[name].suite
alltests.addTest(test_suite())
return alltests
def test(verbose=False):
result = unittest.TextTestRunner(verbosity=1 + int(verbose)).run(suite())
if result.wasSuccessful():
return 0
else:
return 1
if __name__ == '__main__':
test()
```
#### File: matorage/tests/test_torch_model.py
```python
import torch
import unittest
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from tests.test_model import ModelTest
from matorage.model.config import ModelConfig
from matorage.model.torch.manager import ModelManager
from matorage.testing_utils import require_torch
@require_torch
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.fc1 = nn.Linear(28 * 28, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 10)
def forward(self, x):
x = x.view(-1, 28 * 28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
@require_torch
class TorchModelTest(ModelTest, unittest.TestCase):
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
def test_torchmodel_saver(self, model_config=None, save_to_json_file=False):
if model_config is None:
self.model_config = ModelConfig(
**self.storage_config,
model_name="test_torchmodel_saver",
additional={"framework": "pytorch"}
)
else:
self.model_config = model_config
if save_to_json_file:
self.model_config_file = "model_config_file.json"
self.model_config.to_json_file(self.model_config_file)
self.model_manager = ModelManager(config=self.model_config)
model = Model()
self.model_manager.save(model, step=0)
def test_torchmodel_saver_from_json_file(self):
self.test_torchmodel_saver(save_to_json_file=True)
self.model_config = None
self.model_manager = None
self.model_config = ModelConfig.from_json_file(self.model_config_file)
self.model_manager = ModelManager(config=self.model_config)
model = Model()
self.model_manager.save(model, step=0)
def test_torchmodel_loader(self):
self.test_torchmodel_saver()
model = Model()
self.model_manager.load(model, step=0)
def test_torchmodel_loader_with_compressor(self):
model_config = ModelConfig(
**self.storage_config,
model_name="test_torchmodel_loader_with_compressor",
additional={"framework": "pytorch"},
compressor={"complevel": 4, "complib": "zlib"}
)
self.test_torchmodel_saver(model_config=model_config)
self.model_manager = ModelManager(config=self.model_config)
model = Model()
self.model_manager.load(model, step=0)
def test_torchmodel_layer_loader(self):
self.test_torchmodel_saver()
self.model_manager = ModelManager(config=self.model_config)
self.model_manager.load("f.weight", step=0)
@unittest.skip("skip")
def test_mnist_eval(self, model, device):
test_dataset = datasets.MNIST(
"/tmp/data", train=False, transform=self.transform
)
test_loader = DataLoader(test_dataset, batch_size=64, num_workers=4)
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for image, target in test_loader:
image, target = image.to(device), target.to(device)
output = model(image)
test_loss += F.nll_loss(output, target, reduction="sum").item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
return correct
def test_mnist_reloaded(self):
import torch.optim as optim
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_dataset = datasets.MNIST(
"/tmp/data", train=True, download=True, transform=self.transform
)
model = Model().to(device)
optimizer = optim.Adam(model.parameters(), lr=0.01)
criterion = torch.nn.CrossEntropyLoss()
train_loader = DataLoader(train_dataset, batch_size=64, num_workers=4)
for batch_idx, (image, target) in enumerate(tqdm(train_loader)):
image, target = image.to(device), target.to(device)
optimizer.zero_grad()
output = model(image)
loss = criterion(output, target)
loss.backward()
optimizer.step()
self.model_config = ModelConfig(
**self.storage_config,
model_name="testmodel",
additional={"version": "1.0.1"}
)
self.model_manager = ModelManager(config=self.model_config)
self.model_manager.save(model, epoch=1)
pretrained_model = Model().to(device)
correct = self.test_mnist_eval(model=pretrained_model, device=device)
self.model_manager.load(pretrained_model, epoch=1)
pretrained_correct = self.test_mnist_eval(model=pretrained_model, device=device)
assert correct < pretrained_correct
def suite():
return unittest.TestSuite(unittest.makeSuite(TorchModelTest))
if __name__ == "__main__":
unittest.main(defaultTest="suite")
``` |
{
"source": "jinseuk56/gms_dm_python",
"score": 3
} |
#### File: gms_dm_python/codes/PCA_NMF_hyperspectral_decomposition.py
```python
print("Execute Python script in GMS 3")
import numpy as np
import DigitalMicrograph as DM
from sklearn.decomposition import NMF, PCA
#import sys
#sys.argv.extend(['-a', ' '])
#import matplotlib.pyplot as plt
print("Libraries have been imported completely")
# ********************************************************************************
if ( False == DM.IsScriptOnMainThread() ):
print('MatplotLib scripts require to be run on the main thread.')
exit()
# ********************************************************************************
def zero_one_rescale(spectrum):
"""
get rid of negative values
rescale a spectrum [0, 1]
"""
spectrum = spectrum.clip(min=0.0)
min_val = np.min(spectrum)
rescaled = spectrum - min_val
if np.max(rescaled) != 0:
rescaled = rescaled / np.max(rescaled)
return rescaled
# ********************************************************************************
# ********************************************************************************
SI = DM.GetFrontImage()
print(SI)
origin0, scale0, unit0 = SI.GetDimensionCalibration(0, 0)
print(origin0, scale0, unit0)
origin1, scale1, unit1 = SI.GetDimensionCalibration(1, 0)
print(origin1, scale1, unit1)
origin2, scale2, unit2 = SI.GetDimensionCalibration(2, 0)
print(origin2, scale2, unit2)
SI_data = np.rollaxis(SI.GetNumArray(), 0, 3)
print(SI_data.shape)
# ********************************************************************************
# ********************************************************************************
crop_check = input("Do you want to crop spectra ? (Y or N)")
if crop_check == "Y":
start_ind = int(input("initial index of the crop range: "))
end_ind = int(input("final index of the crop range: "))
cr_range = [start_ind, end_ind]
SI_data_cropped = SI_data[:, :, cr_range[0]:cr_range[1]].copy()
elif crop_check == "N":
SI_data_cropped = SI_data.copy()
else:
print("Wrong input ! (only Y or N possible)")
exit()
data_shape = SI_data_cropped.shape[:2]
depth = SI_data_cropped.shape[2]
dataset_input = SI_data_cropped.reshape(-1, depth)
for i in range(len(dataset_input)):
dataset_input[i] = zero_one_rescale(dataset_input[i])
print(dataset_input.shape)
# ********************************************************************************
q_text = """Select one option.
1: PCA (principal component analysis)
2: NMF (non-negative factorization method)"""
decomp_check = int(input(q_text))
num_comp = int(input("How many loading vectors do you want to extract ?"))
if decomp_check == 1:
# ********************************************************************************
pca_num_comp = num_comp
skl_pca = PCA(n_components=pca_num_comp, whiten=False, svd_solver="auto")
pca_coeffs = skl_pca.fit_transform(dataset_input)
pca_comps = skl_pca.components_
num_rec = int(input("How many loading vectors do you want to use when reconstructing the data ?"))
pca_reconstructed = np.dot(pca_coeffs[:, :num_rec], pca_comps[:num_rec]) + skl_pca.mean_
print(pca_coeffs.shape)
print(pca_comps.shape)
print(pca_reconstructed.shape)
# ********************************************************************************
# ********************************************************************************
pca_explained = DM.CreateImage(skl_pca.explained_variance_ratio_.copy())
pca_explained.SetName("Explained variance ratio")
pca_comps_tmp = np.rollaxis(pca_comps.reshape(-1, 1, depth), 2, 0)
pca_comps_dm = DM.CreateImage(pca_comps_tmp.copy())
pca_comps_dm.SetName("PCA loading vectors")
pca_comps_dm.SetDimensionCalibration(1, 1, 1, "loading vector", 0)
if crop_check == "Y":
pca_comps_dm.SetDimensionCalibration(2, origin2+start_ind*scale2, scale2, unit2, 0)
else:
pca_comps_dm.SetDimensionCalibration(2, origin2, scale2, unit2, 0)
pca_coeffs_tmp = np.reshape(pca_coeffs, (data_shape[0], data_shape[1], pca_num_comp, 1))
pca_coeffs_dm = DM.CreateImage(pca_coeffs_tmp.copy())
pca_coeffs_dm.SetName("PCA coefficient maps")
pca_coeffs_dm.SetDimensionCalibration(1, 1, 1, "loading vector", 0)
pca_coeffs_dm.SetDimensionCalibration(2, origin0, scale0, unit0, 0)
pca_coeffs_dm.SetDimensionCalibration(3, origin1, scale1, unit1, 0)
pca_rec_tmp = np.rollaxis(np.reshape(pca_reconstructed, (data_shape[0], data_shape[1], -1)), 2, 0)
pca_rec_dm = DM.CreateImage(pca_rec_tmp.copy())
pca_rec_dm.SetName("PCA reconstructed SI")
pca_rec_dm.SetDimensionCalibration(0, origin0, scale0, unit0, 0)
pca_rec_dm.SetDimensionCalibration(1, origin1, scale1, unit1, 0)
if crop_check == "Y":
pca_rec_dm.SetDimensionCalibration(2, origin2+start_ind*scale2, scale2, unit2, 0)
else:
pca_rec_dm.SetDimensionCalibration(2, origin2, scale2, unit2, 0)
# ********************************************************************************
# ********************************************************************************
pca_explained.ShowImage()
pca_comps_dm.ShowImage()
pca_coeffs_dm.ShowImage()
pca_rec_dm.ShowImage()
# ********************************************************************************
elif decomp_check == 2:
# ********************************************************************************
nmf_num_comp = num_comp
skl_nmf = NMF(n_components=nmf_num_comp, init="nndsvda", solver="mu", max_iter=1000, verbose=True, beta_loss="frobenius", l1_ratio=0.0, alpha=0.0)
nmf_coeffs = skl_nmf.fit_transform(dataset_input)
print(nmf_coeffs[:, [1,2]].shape)
nmf_comps = skl_nmf.components_
# ********************************************************************************
nmf_comps_tmp = np.rollaxis(nmf_comps.reshape(-1, 1, depth), 2, 0)
nmf_comps_dm = DM.CreateImage(nmf_comps_tmp.copy())
nmf_comps_dm.SetName("NMF loading vectors")
nmf_comps_dm.SetDimensionCalibration(1, 1, 1, "loading vector", 0)
if crop_check == "Y":
nmf_comps_dm.SetDimensionCalibration(2, origin2+start_ind*scale2, scale2, unit2, 0)
else:
nmf_comps_dm.SetDimensionCalibration(2, origin2, scale2, unit2, 0)
nmf_coeffs_tmp = np.reshape(nmf_coeffs, (data_shape[0], data_shape[1], nmf_num_comp, 1))
nmf_coeffs_dm = DM.CreateImage(nmf_coeffs_tmp.copy())
nmf_coeffs_dm.SetName("NMF coefficient maps")
nmf_coeffs_dm.SetDimensionCalibration(1, 1, 1, "loading vector", 0)
nmf_coeffs_dm.SetDimensionCalibration(2, origin0, scale0, unit0, 0)
nmf_coeffs_dm.SetDimensionCalibration(3, origin1, scale1, unit1, 0)
nmf_comps_dm.ShowImage()
nmf_coeffs_dm.ShowImage()
# ********************************************************************************
nmf_reconstructed = np.dot(nmf_coeffs, nmf_comps)
print(nmf_coeffs.shape)
print(nmf_comps.shape)
print(nmf_reconstructed.shape)
nmf_rec_tmp = np.rollaxis(np.reshape(nmf_reconstructed, (data_shape[0], data_shape[1], -1)), 2, 0)
nmf_rec_dm = DM.CreateImage(nmf_rec_tmp.copy())
nmf_rec_dm.SetName("NMF reconstructed SI")
nmf_rec_dm.SetDimensionCalibration(0, origin0, scale0, unit0, 0)
nmf_rec_dm.SetDimensionCalibration(1, origin1, scale1, unit1, 0)
if crop_check == "Y":
nmf_rec_dm.SetDimensionCalibration(2, origin2+start_ind*scale2, scale2, unit2, 0)
else:
nmf_rec_dm.SetDimensionCalibration(2, origin2, scale2, unit2, 0)
nmf_rec_dm.ShowImage()
# ********************************************************************************
else:
print("Wrong input ! (only 1 or 2 possible)")
exit()
``` |
{
"source": "jinshengye-git/aipnd-project",
"score": 2
} |
#### File: jinshengye-git/aipnd-project/train.py
```python
import os
import argparse
import torch
import toolkit
def get_input_args():
parser = argparse.ArgumentParser()
valid_archs = {'densenet121', 'vgg16'}
parser.add_argument('--architectures', dest='architectures', default='vgg16', action='store', choices=valid_archs,help='model architectures')
parser.add_argument('--data_dir', type=str, help='dir to load images', default='./flower_data')
parser.add_argument('--save_dir', type=str, default='checkpoints', help='dir to save checkpoints, default checkpoints')
parser.add_argument('--hidden_units', type=int, default=500, help='hidden units, default 500')
parser.add_argument('--learning_rate', type=float, default=0.005, help='learning rate, default 0.005')
parser.add_argument('--gpu', dest='gpu', action='store_true', help='training device, default gpu')
parser.add_argument('--epochs', type=int, default=3, help='training epochs, default 3')
parser.add_argument('--num_threads', type=int, default=8,help='thread to training with cpu')
parser.set_defaults(gpu=True)
return parser.parse_args()
def main():
input_args = get_input_args()
gpu = torch.cuda.is_available() and input_args.gpu
dataloaders, class_to_idx = toolkit.get_dataloders(input_args.data_dir)
model, optimizer, criterion = toolkit.model_create(
input_args.architectures,
input_args.learning_rate,
input_args.hidden_units,
class_to_idx
)
if gpu:
model.cuda()
criterion.cuda()
else:
torch.set_num_threads(input_args.num_threads)
epochs = 5
print_every = 50
toolkit.train(model, dataloaders['training'], epochs, print_every, criterion, optimizer, device='gpu')
if input_args.save_dir:
if not os.path.exists(input_args.save_dir):
os.makedirs(input_args.save_dir)
file_path = input_args.save_dir + '/' + input_args.architectures + '_checkpoint.pth'
else:
file_path = input_args.architectures + '_checkpoint.pth'
toolkit.save_checkpoint(file_path,
model, optimizer,
input_args.architectures,
input_args.learning_rate,
input_args.epochs
)
toolkit.validation(model, dataloaders['testing'], criterion)
if __name__ == "__main__":
main()
``` |
{
"source": "jinshengye-git/jetbot",
"score": 3
} |
#### File: waveshare_motor_drive/python3/main.py
```python
from PCA9685 import PCA9685
import time
Dir = [
'forward',
'backward',
'spinleft',
'spinright',
'slowleft',
'slowright'
]
pwm = PCA9685(0x40, debug=True)
pwm.setPWMFreq(50)
class MotorDriver():
def __init__(self):
self.PWMA = 0
self.AIN1 = 1
self.AIN2 = 2
self.PWMB = 5
self.BIN1 = 3
self.BIN2 = 4
def MotorRun(self, motor, index, speed):
# DC Motor :
#
if speed > 100:
return
if(motor == 0): #Left Motor
pwm.setDutycycle(self.PWMA, speed)
if(index == Dir[0]):
pwm.setLevel(self.AIN1, 0)
pwm.setLevel(self.AIN2, 1)
elif (index == Dir[1]):
pwm.setLevel(self.AIN1, 1)
pwm.setLevel(self.AIN2, 0)
elif (index == Dir[2]):
pwm.setLevel(self.AIN1, 0)
pwm.setLevel(self.AIN2, 1)
elif (index == Dir[3]):
pwm.setLevel(self.AIN1, 1)
pwm.setLevel(self.AIN2, 0)
elif (index == Dir[4]):
speed = speed * 0.8
pwm.setDutycycle(self.PWMA, speed)
pwm.setLevel(self.AIN1, 0)
pwm.setLevel(self.AIN2, 1)
else:
speed = speed * 0.8
pwm.setDutycycle(self.PWMA, speed)
pwm.setLevel(self.AIN1, 1)
pwm.setLevel(self.AIN2, 0)
else: #Right Motor
pwm.setDutycycle(self.PWMB, speed)
if(index == Dir[0]):
pwm.setLevel(self.BIN1, 0)
pwm.setLevel(self.BIN2, 1)
elif (index == Dir[1]):
pwm.setLevel(self.BIN1, 1)
pwm.setLevel(self.BIN2, 0)
elif (index == Dir[2]):
pwm.setLevel(self.BIN1, 0)
pwm.setLevel(self.BIN2, 1)
elif (index == Dir[3]):
pwm.setLevel(self.BIN1, 1)
pwm.setLevel(self.BIN2, 0)
elif (index == Dir[4]):
speed = speed * 0.8
pwm.setDutycycle(self.PWMB, speed)
pwm.setLevel(self.BIN1, 0)
pwm.setLevel(self.BIN2, 1)
else:
speed = speed * 0.8
pwm.setDutycycle(self.PWMB, speed)
pwm.setLevel(self.BIN1, 1)
pwm.setLevel(self.BIN2, 0)
def MotorStop(self, motor):
if (motor == 0):
pwm.setDutycycle(self.PWMA, 0)
else:
pwm.setDutycycle(self.PWMB, 0)
try:
Motor = MotorDriver()
# control 2 motor
Motor.MotorRun(0, 'slowright', 100)
Motor.MotorRun(1, 'slowright', 50)
#print("sssssssss1")
while(1):
time.sleep(1);
except IOError as e:
print(e)
except KeyboardInterrupt:
print("\r\nctrl + c:")
Motor.MotorRun(0, 'forward', 0)
Motor.MotorRun(1, 'backward', 0)
exit()
``` |
{
"source": "jinshengye-git/turtlesim_dash_tutorial",
"score": 3
} |
#### File: src/turtlesim_dash_tutorial/dashboard.py
```python
from __future__ import print_function, division
import os
import sys
import time
import json
import signal
import traceback
import numpy as np
from threading import Lock
import rospy
import rospkg
import actionlib
from actionlib_msgs.msg import GoalStatus
from turtlesim.msg import Pose
from turtle_actionlib.msg import ShapeAction, ShapeGoal
# Plotly, Dash, and Flask
import plotly.graph_objs as go
import dash
import dash_core_components as dcc
import dash_html_components as html
from flask import jsonify
# Helper functions and constants (should ideally be in a utils module)
GOAL_STATUS_TO_TXT = { getattr(GoalStatus, x): x for x in dir(GoalStatus) if x.isupper() }
# The app definition
APP = dash.Dash(
__name__,
assets_folder=os.path.join(rospkg.RosPack().get_path('turtlesim_dash_tutorial'), 'dash_assets'),
external_stylesheets=[
{
'href': 'https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css',
'rel': 'stylesheet',
'integrity': 'sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T',
'crossorigin': 'anonymous',
},
]
)
class Dashboard(object):
"""
Create a Flask server to display the UI and a ROS node to send commands to
the turtlesim
"""
# Flask
APP_HOST = '0.0.0.0'
APP_PORT = 8080
APP_STATUS_URL = '/ros_api/status'
APP_STATUS_ENDPOINT = 'ros_status'
# Actions, Topics, and Services
# Note that although the values are hard-coded for now, these can be set via
# service or ROS params if need be (a trivial update)
TURTLE_SHAPE_ACTION_NAME = 'turtle_shape'
TURTLE_POSE_TOPIC = '/turtle1/pose'
# Constants that determine the behaviour of the dashboard
# Pose is published at ~62 Hz; so we'll see ~30 sec of history. Note that
# these parameters could be set through ROS parameters or services too!
POSE_UPDATE_INTERVAL = 5
POSE_MAX_TIMESTEPS = 2000
POSE_ATTRIBUTES = ['x', 'y', 'theta', 'linear_velocity', 'angular_velocity']
# Constants for pertinent output fields
SERVER_STATUS_OUTPUT_FORMAT = "Shape Server Status: {status}"
def __init__(self):
global APP
# The Flask application
self._app = APP
self._flask_server = self._app.server
# Create the stop signal handler
signal.signal(signal.SIGINT, self.stop)
# Initialize the variables that we'll be using to save information
self._server_status = GoalStatus.LOST
self._pose_history = np.ones(
(1+len(Dashboard.POSE_ATTRIBUTES), Dashboard.POSE_MAX_TIMESTEPS)) * np.nan
self._history_length = 0
self._pose_history_lock = Lock()
# Setup the subscribers, action clients, etc.
self._shape_client = actionlib.SimpleActionClient(Dashboard.TURTLE_SHAPE_ACTION_NAME, ShapeAction)
self._pose_sub = rospy.Subscriber(Dashboard.TURTLE_POSE_TOPIC, Pose, self._on_pose)
# Initialize the application
self._define_app()
@property
def pose_history(self):
return self._pose_history[:, :self._history_length]
def start(self):
rospy.loginfo("Connecting to turtle_shape...")
self._shape_client.wait_for_server()
rospy.loginfo("...turtle_shape connected.")
self._app.run_server(host=Dashboard.APP_HOST,
port=Dashboard.APP_PORT,
debug=False)
def stop(self, *args, **kwargs):
# Give some time for rospy to shutdown (cannot use rospy now!)
print("Shutting down Dash server")
time.sleep(2)
sys.exit(0)
def _define_app(self):
"""
Define the app layout and callbacks here
"""
# Define each component of the page
# First the graph element that will plot the pose and velocity of the
# robot
pose_graph_layout = html.Div(dcc.Graph(id='pose', style={ 'width': '100%' }), className='row')
# Then the section that will update the parameters for the shape that
# the turtle will trace in the turtle sim
shape_params_layout = html.Div(
[
dcc.Input(id="shape-edges", type='number', placeholder='Num Edges', className='col mx-2'),
dcc.Input(id="shape-radius", type='number', placeholder='Radius', className='col mx-2'),
html.Button("Trace Shape", id='trace-button', n_clicks=0, className='btn btn-large btn-primary col-3'),
],
className='row'
)
# Then the section that will display the status of the shape server
server_status_layout = html.Div(
dcc.Markdown(id='server-status', className='col'),
className='row my-2'
)
# String them all together in a single page
self._app.layout = html.Div(
[
# Hidden button for JS polling
html.Button(id='refresh-status', n_clicks=0, style={ 'display': 'none' }),
# The params for tracing the shape
html.Div(html.H3('Shape Tracing:', className='col'), className='row mt-4'),
shape_params_layout,
server_status_layout,
# The section showing the action status
html.Div(html.H3('Pose History:', className='col'), className='row my-2'),
pose_graph_layout,
# The interval component to update the plots
dcc.Interval(id='interval-component',
n_intervals=0,
interval=(Dashboard.POSE_UPDATE_INTERVAL * 1000)),
],
className="container"
)
# Define callbacks to update the elements on the page
self._app.callback(
dash.dependencies.Output('pose', 'figure'),
[dash.dependencies.Input('interval-component', 'n_intervals')]
)(self._define_pose_history_callback())
# Define a callback to send the goal to the server when the 'Trace'
# button is clicked. Wait until the client is done executing
self._app.callback(
dash.dependencies.Output('trace-button', 'autoFocus'),
[dash.dependencies.Input('trace-button', 'n_clicks')],
[dash.dependencies.State('shape-edges', 'value'),
dash.dependencies.State('shape-radius', 'value')]
)(self._define_trace_shape_callback())
# Define a callback to show the status of the server
self._app.callback(
dash.dependencies.Output('server-status', 'children'),
[dash.dependencies.Input('refresh-status', 'n_clicks')]
)(self._define_server_status_callback())
# Add the flask API endpoints
self._flask_server.add_url_rule(
Dashboard.APP_STATUS_URL,
Dashboard.APP_STATUS_ENDPOINT,
self._flask_status_endpoint
)
def _define_server_status_callback(self):
"""
Define a callback to populate the server status display when the status
refresh button (hidden) is pressed
"""
def server_status_callback(n_clicks):
status = GOAL_STATUS_TO_TXT.get(self._server_status)
return Dashboard.SERVER_STATUS_OUTPUT_FORMAT.format(**locals())
return server_status_callback
def _define_trace_shape_callback(self):
"""
Define a callback that will be invoked every time the 'Trace' button is
clicked.
"""
def trace_shape_callback(n_clicks, num_edges, radius):
# Ignore the 'click' event when the component is created
if n_clicks is None or n_clicks == 0:
return False
# Coerce the input data into formats that we can use
try:
num_edges = int(num_edges)
radius = float(radius)
except Exception as e:
rospy.logerr("Error parsing params - {}\n{}".format(e, traceback.format_exc()))
return False
# Create the goal and send it to the action server
goal = ShapeGoal(edges=num_edges, radius=radius)
self._shape_client.send_goal(goal)
self._server_status = GoalStatus.ACTIVE
# Wait for a result
self._shape_client.wait_for_result()
# Finally, update the status, log the result, and return true
self._server_status = self._shape_client.get_state()
result = self._shape_client.get_result()
rospy.loginfo("ShapeServer: Interior Angle - {result.interior_angle}, Apothem - {result.apothem}".format(**locals()))
return True
return trace_shape_callback
def _define_pose_history_callback(self):
"""
Define a callback that will be invoked on every update of the interval
component. Keep in mind that we return a callback here; not a result
"""
def pose_history_callback(n_intervals):
# Get a view into the latest pose history
pose_history = self.pose_history
# Create the output graph
data = [
go.Scatter(
name=attr,
x=pose_history[0, :],
y=pose_history[idx+1, :],
mode='lines+markers'
)
for idx, attr in enumerate(Dashboard.POSE_ATTRIBUTES)
]
layout = go.Layout(
showlegend=True,
height=500,
yaxis=dict(
fixedrange=True
),
margin=dict(
autoexpand=True
)
)
return { 'data': data, 'layout': layout }
return pose_history_callback
def _on_pose(self, msg):
"""
The callback for the position of the turtle on
:const:`TURTLE_POSE_TOPIC`
"""
if self._history_length == Dashboard.POSE_MAX_TIMESTEPS:
self._pose_history[:, :-1] = self._pose_history[:, 1:]
else:
self._history_length += 1
self._pose_history[:, self._history_length-1] = [
rospy.Time.now().to_time() % 1000,
msg.x,
msg.y,
msg.theta,
msg.linear_velocity,
msg.angular_velocity,
]
def _flask_status_endpoint(self):
return jsonify({
'server_status': self._server_status,
})
``` |
{
"source": "jinshisai/StatsVfield",
"score": 3
} |
#### File: StatsVfield/statsvfield/analysis_tools.py
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
def binning(bin_e, coordinates, data):
'''
Binning data according to given bins and a set of coordinates and data.
'''
#bin_c = 0.5 .*(bin_e[2:length(bin_e)] .+ bin_e[1:length(bin_e)-1])
d_bin = np.zeros(len(bin_e)-1)
for i in range(len(bin_e)-1):
indx = np.where( (coordinates >= bin_e[i]) & (coordinates < bin_e[i+1]))
if len(indx[0]) == 0:
d_bin[i] = np.nan
else:
d_bin[i] = np.nanmean(data[indx])
return d_bin
def plawfit(x, y, pini, sig=None, xlim=[], cutzero=True, x0=None, mode='lin', printres=True):
'''
'''
from scipy.optimize import leastsq
# fit function
# power law
plaw = lambda x, x0, param: param[0]*((x/x0)**(param[1]))
errfunc = lambda param, x, y, sig, x0: (plaw(x, x0, param) - y)/sig
#res = leastsq(errfunc, [1e-3, -3], args=(freq_fft[1:], np.abs(res_spec[1:])**2.))
# linear
fln = lambda x, x0, param: param[0] + param[1]*(x - x0)
errfunc2 = lambda param, x, y, sig, x0: (fln(x, x0, param) - y)/sig
# fitting range
if len(xlim) == 2:
where_fit = (x > xlim[0]) & (x <= xlim[-1])
y_fit = y[where_fit]
x_fit = x[where_fit]
if type(sig).__name__ == 'ndarray':
sig_fit = sig[where_fit]
else:
sig_fit = sig
else:
y_fit = y
x_fit = x
sig_fit = sig
if mode == 'lin':
if type(sig).__name__ == 'NoneType':
sig_fit = 1
if type(x0).__name__ == 'NoneType':
x0_fit = 1
res = leastsq(errfunc, pini, args=(x_fit, y_fit, sig_fit, x0_fit), full_output=True)
pout = res[0]
pcov = res[1]
chi2 = np.sum(errfunc(pout, x_fit, y_fit, sig_fit, x0_fit)**2.)
elif mode == 'log':
if type(x0).__name__ == 'NoneType':
x0_fit = 0.
else:
x0_fit = np.log10(x0)
if type(sig).__name__ == 'NoneType':
sig_fit = 1
res = leastsq(errfunc2, pini,
args=(np.log10(x_fit), np.log10(y_fit), sig_fit, x0_fit),
full_output=True)
else:
res = leastsq(errfunc2, pini,
args=(np.log10(x_fit), np.log10(y_fit), sig_fit/(y_fit*np.log(10)), x0_fit),
full_output=True)
pout = res[0]
pcov = res[1]
chi2 = np.sum(errfunc2(pout, np.log10(x_fit), np.log10(y_fit), sig_fit/(y_fit*np.log(10)), x0_fit)**2.)
else:
print('ERROR\tplawfit: mode must be lin or log.')
return
ndata = len(x_fit)
nparam = len(pout)
dof = ndata - nparam - 1
reduced_chi2 = chi2/dof
# parameter errors
if (dof >= 0) and (pcov is not None):
pcov = pcov*reduced_chi2
else:
pcov = np.full((nparam, nparam),np.inf)
perr = np.array([
np.abs(pcov[j][j])**0.5 for j in range(nparam)
])
if printres:
print('Power-law fit')
print('pini: (c, p) = (%.4e, %.4e)'%(pini[0], pini[1]))
print('pout: (c, p) = (%.4e, %.4e)'%(pout[0], pout[1]))
print('perr: (sig_c, sig_p) = (%.4e, %.4e)'%(perr[0], perr[1]))
print('reduced chi^2: %.4f'%reduced_chi2)
return pout, perr
```
#### File: StatsVfield/statsvfield/_statsvfield.py
```python
import numpy as np
from scipy.fft import fft, ifft, fftn, ifftn, fftfreq, fftshift, ifftshift
from scipy.fft import rfftfreq
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
#import seaborn as sns
#sns.set_palette('gist_earth')
# Class StatsVF
class StatsVfield():
def __init__(self, data, axes, derr=[]) -> None:
self.data = data
self.datashape = data.shape
self.ndim = len(data.shape)
self.derr = derr
if type(axes) == list:
if len(axes) != self.ndim:
print ('ERROR: Dimension of given data and axes do not match.')
return
elif type(axes).__name__ == 'ndarray':
if len(axes.shape) != self.ndim:
print ('ERROR: Dimension of given data and axes do not match.')
return
else:
print ('ERROR: axes must be list or ndarray containing xi, or ndarray of x.')
return
if self.ndim == 1:
self.nx = self.datashape[0]
if type(axes) == list:
self.x = axes[0]
elif type(axes).__name__ == 'ndarray':
self.x = axes
self.dx = self.x[1] - self.x[0]
elif self.ndim == 2:
self.nx, self.ny = self.datashape
self.x, self.y = axes
self.dx = self.x[1] - self.x[0]
self.dy = self.y[1] - self.y[0]
elif self.ndim == 3:
self.nx, self.ny, self.nz = self.datashape
self.x, self.y, self.z = axes
self.dx = self.x[1] - self.x[0]
self.dy = self.y[1] - self.y[0]
self.dz = self.z[1] - self.z[0]
elif self.ndim > 3:
print ('ERROR: Dimension must be <= 3.')
return
self.acf = []
self.sf = []
self.tau_x = []
def calc_sf(self, p_order=2):
'''
Calculate the second-order structure function (SF).
Other orders will be supported in future.
Usage
-----
vf = StatsVfield(data, axes)
vf.calc_sf()
vf.sf # call the calculated SF
Parameters
----------
- p_order: Order of the structuer function. Currently not used.
'''
if self.ndim == 1:
if len(self.derr) == 0:
self.sf = sf_1d(self.data)
else:
self.sf, self.sf_err = sf_1d(self.data, derr=self.derr)
elif self.ndim == 2:
if len(self.derr) == 0:
self.sf = sf_2d(self.data)
else:
self.sf, self.sf_err = sf_2d(self.data, derr=self.derr)
elif self.ndim == 3:
print ('3D is being developed.')
return
self.get_tau(realfreq=True)
def calc_ac(self, method='FFT', realfreq=False):
'''
Calculate autocorrelation (AC).
Usage
-----
vf = StatsVfield(data, axes)
vf.calc_ac()
vf.acf # call the calculated ACF
Parameters
----------
- method: Calculation ways; FFT or iterative. FFT mode uses Fast Fourier Transform, while
iterative mode calculates ACF iteratively sliding an input data set.
- realfreq: If True, only ACF within positive tau will be return. Option only for in one-dimensional data set.
'''
if self.ndim == 1:
if method == 'FFT':
self.acf = ac_fft1(self.data, realfreq=realfreq)
elif method == 'iterative':
if len(self.derr) == 0:
self.acf = ac_1d(self.data, realfreq=realfreq)
else:
self.acf, self.acf_err = ac_1d(self.data, derr=self.derr, realfreq=realfreq)
elif self.ndim == 2:
if method == 'FFT':
self.acf = ac_fft2(self.data)
elif method == 'iterative':
if len(self.derr) == 0:
self.acf = ac_2d(self.data)
else:
self.acf, self.acf_err = ac_2d(self.data, derr=self.derr)
#if len(self.tau_x) == 0:
self.get_tau(realfreq=realfreq)
def calc_ps(self, method='FFT', realfreq=False):
'''
Calculate power-spectrum (PS). Still under development.
Usage
-----
Coming soon..
'''
if self.ndim == 1:
self.ps = pspec_1d(self.data, realfreq=realfreq)
elif self.ndim == 2:
print ('Still being developed, sorry.')
#self.ps = pspec_2d(self.data, realfreq=realfreq)
if realfreq:
self.freq_x = rfftfreq(self.nx + self.nx - 1, self.dx) # nx -1 is for zero-padding
else:
self.freq_x = fftshift(fftfreq(self.nx + self.nx - 1, self.dx))
#print(len(self.ps), len(self.freq_x))
def get_tau(self, realfreq=False):
'''
Get tau for ACF and SF.
Parameters
----------
- realfreq: For one-dimensional data set, if True, only positive tau will be returned.
'''
if self.ndim == 1:
if realfreq:
self.tau_x = np.arange(0, self.nx, 1)*self.dx
else:
self.tau_x = np.concatenate([np.arange(-(self.nx - 1), 0, 1)*self.dx, np.arange(0, self.nx, 1)*self.dx])
elif self.ndim == 2:
self.tau_x = np.concatenate([np.arange(-(self.nx - 1), 0, 1)*self.dx, np.arange(0, self.nx, 1)*self.dx])
self.tau_y = np.concatenate([np.arange(-(self.ny - 1), 0, 1)*self.dy, np.arange(0, self.ny, 1)*self.dy])
elif self.ndim == 3:
print ('3D is being developed.')
return
def collapse(self):
if self.ndim == 1:
print ('Data is one dimensional. No more collapse.')
return
elif self.ndim == 2:
tau_xx, tau_yy = np.meshgrid(self.tau_x, self.tau_y)
tau_rr = np.sqrt(tau_xx*tau_xx + tau_yy*tau_yy)
tau_sort = np.unique(tau_rr)
self.tau_col = tau_sort
if len(self.acf) != 0:
self.acf_col = np.array([
np.nanmean(self.acf[tau_rr == tau_i]) for tau_i in tau_sort])
self.acf_err_col = np.array([
np.sqrt(np.nansum(self.acf_err[tau_rr == tau_i]**2))/np.count_nonzero(~np.isnan(self.acf_err[tau_rr == tau_i]))
for tau_i in tau_sort])
if len(self.sf) !=0:
self.sf_col = np.array([
np.nanmean(self.sf[tau_rr == tau_i]) for tau_i in tau_sort])
self.sf_err_col = np.array([
np.sqrt(np.nansum(self.sf_err[tau_rr == tau_i]**2))/np.count_nonzero(~np.isnan(self.sf_err[tau_rr == tau_i]))
for tau_i in tau_sort])
def get_tauzero(self):
if self.ndim == 2:
print ('Currently get_tauzero only supports one-dimensional data.')
return
if 'acf' in self.__dict__.keys():
indx = [i for i in range(len(self.acf)-1) if self.acf[i]*self.acf[i+1] <=0]
if len(indx) > 0:
indx_tau0 = indx[0]
self.tau0 = self.tau_x[indx_tau0]
else:
self.tau0 = np.nan
else:
print ('ACF is not found. Calculate ACF first by vf.calc_ac().')
return
def sf_plawfit(self, pini, taurange=[], cutzero=True):
'''
'''
from scipy.optimize import leastsq
# fit function
# power law
plaw = lambda x, param: param[0]*(x**(param[1]))
errfunc = lambda param, x, y: plaw(x, param) - y
#res = leastsq(errfunc, [1e-3, -3], args=(freq_fft[1:], np.abs(res_spec[1:])**2.))
# linear
fln = lambda x, param: param[0] + param[1]*x
errfunc2 = lambda param, x, y: fln(x, param) - y
# fit param
if cutzero:
tau_fit = self.tau_x[1:]
sf_fit = self.sf[1:]
else:
tau_fit = self.tau_x
sf_fit = self.sf
# fitting range
if len(taurange) == 2:
where_fit = (tau_fit > taurange[0]) & (tau_fit <= taurange[-1])
sf_fit = sf_fit[where_fit]
tau_fit = tau_fit[where_fit]
#res = leastsq(errfunc2, [-3, -3], args=(np.log10(tau_sf[where_fit]), np.log10(sf_slice[where_fit])))
#p_out = res[0]
res = leastsq(errfunc2, pini, args=(np.log10(tau_fit), np.log10(sf_fit)))
pout = res[0]
self.fit_results = dict({'pini': pini, 'pout': pout})
# functions for debug
def gaussian2D(x, y, A, mx, my, sigx, sigy, pa=0, peak=True):
'''
Generate normalized 2D Gaussian
Parameters
----------
x: x value (coordinate)
y: y value
A: Amplitude. Not a peak value, but the integrated value.
mx, my: mean values
sigx, sigy: standard deviations
pa: position angle [deg]. Counterclockwise is positive.
'''
x, y = rotate2d(x,y,pa)
mx, my = rotate2d(mx, my, pa)
if peak:
coeff = A
else:
coeff = A/(2.0*np.pi*sigx*sigy)
expx = np.exp(-(x-mx)*(x-mx)/(2.0*sigx*sigx))
expy = np.exp(-(y-my)*(y-my)/(2.0*sigy*sigy))
gauss=coeff*expx*expy
return gauss
# main functions
# autocorrelation function
def ac_1d(data, derr=[], realfreq=True):
'''
Calculate auto-correlation.
Parameters
----------
Return
------
'''
#from itertools import product
nx = len(data)
d_in = data.copy() - np.nanmean(data)
if realfreq:
# auto-correlation
d_ac = np.array([
np.nanmean(d_in[0:nx-j]*d_in[j:nx]) for j in range(nx)])/np.nanvar(data)
else:
# zero-padding
d_in = np.concatenate([d_in, np.zeros(nx-1)])
d_shift = data.copy() - np.nanmean(data)
d_shift = np.concatenate([np.zeros(nx-1), d_shift])
# replace zero with nan to skip
d_in[d_in == 0.] = np.nan
d_shift[d_shift == 0.] = np.nan
nx_out = 2*nx - 1
d_ac = np.array([
np.nanmean(d_in[0:nx_out-i]*d_shift[i:nx_out]) for i in range(nx_out)
])/np.nanvar(data)
if len(derr) == 0:
return d_ac
else:
# error propagation
if realfreq:
d_in_err = derr.copy() # assuming error of mean can be ignored
d_ac_err = np.array([
np.sqrt(np.nansum((d_in[0:nx-j]*d_in_err[j:nx])**2\
+ (d_in[j:nx]*d_in_err[0:nx-j])**2 ))\
/np.count_nonzero(~np.isnan(d_in[0:nx-j]*d_in[j:nx])) for j in range(nx)])/np.nanvar(data)
else:
# zero-padding
d_in_err = np.concatenate([derr, np.zeros(nx-1)])
d_shift_err = np.concatenate([np.zeros(nx-1), derr])
d_in_err[d_in_err == 0.] = np.nan
d_shift_err[d_shift_err == 0.] = np.nan
# error of each element:
# (m1 +/- sig1)*(m2 +/- sig2) = m1*m2 +/- sqrt((m1*sig2)^2 + (m2*sig1)^2)
# error of mean
# sqrt(Sum(sig_i^2))/N
d_ac_err = np.array([
np.sqrt(np.nansum((d_in[0:nx_out-i]*d_shift_err[i:nx_out])**2 \
+ (d_in_err[0:nx_out-i]*d_shift[i:nx_out])**2))\
/np.count_nonzero(~np.isnan(d_in[0:nx_out-i]*d_shift[i:nx_out])) for i in range(nx_out)
])/np.nanvar(data)
return d_ac, d_ac_err
def ac_fft1(data, realfreq=False):
'''
Calculate auto-correlation using FFT.
'''
nx = len(data)
d_in = np.r_[data - np.nanmean(data), np.zeros(nx-1)] # zero-padding
d_ft = fft(d_in) # Fourier transform
d_ft_cnj = np.conjugate(fft(d_in)) # complex conjugate
d_ac = ifft(d_ft*d_ft_cnj).real
d_ac /= np.r_[np.arange(1,nx+1,1)[::-1], np.arange(1,nx,1)] # weighting
d_ac /= np.nanvar(data)
if realfreq:
d_ac = d_ac[:len(d_ac)//2+1]
else:
d_ac = fftshift(d_ac)
return d_ac
def ac_2d(data, derr=[]):
'''
Calculate auto-correlation.
Parameters
----------
Return
------
'''
nx, ny = data.shape
# zero-padding for convolution
d_in = data.copy() - np.nanmean(data)
d_in = np.r_[d_in, np.zeros((d_in.shape[0]-1,d_in.shape[1]))]
d_in = np.c_[d_in, np.zeros((d_in.shape[0],d_in.shape[1]-1))]
d_shift = data.copy() - np.nanmean(data)
d_shift = np.r_[np.zeros((d_shift.shape[0]-1,d_shift.shape[1])), d_shift]
d_shift = np.c_[np.zeros((d_shift.shape[0],d_shift.shape[1]-1)), d_shift]
# replace zero with nan to skip
d_in[d_in == 0.] = np.nan
d_shift[d_shift == 0.] = np.nan
# autocorrelation
nx_out = 2*nx - 1
ny_out = 2*ny - 1
d_ac = np.array([
[np.nanmean(
d_in[:nx_out - k, :ny_out - l] * d_shift[k:nx_out, l:ny_out])
for l in range(ny_out)] for k in range(nx_out)])
d_ac /= np.nanvar(data)
if len(derr) == 0:
return d_ac
else:
# error propagation
# zero-padding
d_in_err = derr.copy()
d_in_err = np.r_[d_in_err, np.zeros((d_in_err.shape[0]-1, d_in_err.shape[1]))]
d_in_err = np.c_[d_in_err, np.zeros((d_in_err.shape[0], d_in_err.shape[1]-1))]
d_shift_err = derr.copy()
d_shift_err = np.r_[np.zeros((d_shift_err.shape[0]-1, d_shift_err.shape[1])), d_shift_err]
d_shift_err = np.c_[np.zeros((d_shift_err.shape[0], d_shift_err.shape[1]-1)), d_shift_err]
d_in_err[d_in_err == 0.] = np.nan
d_shift_err[d_shift_err == 0.] = np.nan
# error of each element:
# (m1 +/- sig1)*(m2 +/- sig2) = m1*m2 +/- sqrt((m1*sig2)^2 + (m2*sig1)^2)
# error of mean
# sqrt(Sum(sig_i^2))/N
d_ac_err = np.array([[
np.sqrt(np.nansum((d_in[:nx_out - k, :ny_out - l]*d_shift_err[k:nx_out, l:ny_out])**2 \
+ (d_in_err[:nx_out - k, :ny_out - l]*d_shift[k:nx_out, l:ny_out])**2))\
/np.count_nonzero(~np.isnan(d_in[:nx_out - k, :ny_out - l]*d_shift[k:nx_out, l:ny_out]))
for l in range(ny_out)] for k in range(nx_out)]
)/np.nanvar(data)
return d_ac, d_ac_err
def ac_fft2(data):
nx, ny = data.shape
d_in = data.copy()
d_in[np.isnan(d_in)] = 0. # fill nan with zero
d_in -= np.nanmean(data)
# zero-padding
d_in = np.r_[d_in, np.zeros((d_in.shape[0]-1,d_in.shape[1]))] # zero-padding for convolution
d_in = np.c_[d_in, np.zeros((d_in.shape[0],d_in.shape[1]-1))] # zero-padding for convolution
d_ft = fftn(d_in) # Fourier transform
d_ft_cnj = np.conjugate(d_ft) # complex conjugate
d_ac = ifftn(d_ft*d_ft_cnj).real
# weighting with sample number
#print(d_ac.shape[0], nx)
wx = np.concatenate([np.arange(1, nx+1, 1), np.arange(nx-1, 0, -1)])
wx = ifftshift(wx)
wy = np.concatenate([np.arange(1, ny+1, 1), np.arange(ny-1, 0, -1)])
wy = ifftshift(wy)
#wx = np.r_[np.arange(1, d_ac.shape[0]//2+2, 1)[::-1], np.arange(1,d_ac.shape[0]//2+1,1)]
#wy = np.r_[np.arange(1, d_ac.shape[1]//2+2, 1)[::-1], np.arange(1,d_ac.shape[1]//2+1,1)]
wxx, wyy = np.meshgrid(wx, wy)
d_ac /= (wxx*wyy)*np.nanvar(data)
#if realfreq:
# print("Resultant ACF has only the positive axis.")
# print("The output axis length is nx/2.")
# d_ac = d_ac[0:d_ac.shape[1]//2+1,0:d_ac.shape[0]//2+1]
#else:
d_ac = ifftshift(d_ac)
return d_ac
# structure function
def sf_1d(data, derr=[]):
'''
Calculate the structure function.
Parameters
----------
Return
------
'''
nx = len(data)
d_sf = np.array([
np.nanmean((data[:nx-i] - data[i:nx])**2.) for i in range(nx)
])
if len(derr) == 0:
return d_sf
else:
# error propagation
d_sf_err = np.array([
np.sqrt(np.nansum((4.* (data[:nx-i] - data[i:nx])**2. * (derr[:nx-i]**2 + derr[i:nx]**2.))))\
/np.count_nonzero(~np.isnan((data[:nx-i] - data[i:nx]))) for i in range(nx)
])
return d_sf, d_sf_err
def sf_2d(data, derr=[], normalize=False):
'''
Calculate auto-correlation.
Parameters
----------
Return
------
'''
nx, ny = data.shape
# zero-padding for convolution
d_in = data.copy() - np.nanmean(data)
d_in = np.r_[d_in, np.zeros((d_in.shape[0]-1,d_in.shape[1]))]
d_in = np.c_[d_in, np.zeros((d_in.shape[0],d_in.shape[1]-1))]
d_shift = data.copy() - np.nanmean(data)
d_shift = np.r_[np.zeros((d_shift.shape[0]-1,d_shift.shape[1])), d_shift]
d_shift = np.c_[np.zeros((d_shift.shape[0],d_shift.shape[1]-1)), d_shift]
# replace zero with nan to skip
d_in[d_in == 0.] = np.nan
d_shift[d_shift == 0.] = np.nan
# structure function
nx_out = 2*nx - 1
ny_out = 2*ny - 1
d_sf = np.array([[
np.nanmean(
(d_in[:nx_out - k, :ny_out - l] - d_shift[k:nx_out, l:ny_out])**2. )
for l in range(ny_out)] for k in range(nx_out)])
if normalize:
d_sf /= d_sf[0,0]
if len(derr) == 0:
return d_sf
else:
# error propagation
# zero-padding
d_in_err = derr.copy()
d_in_err = np.r_[d_in_err, np.zeros((d_in_err.shape[0]-1, d_in_err.shape[1]))]
d_in_err = np.c_[d_in_err, np.zeros((d_in_err.shape[0], d_in_err.shape[1]-1))]
d_shift_err = derr.copy()
d_shift_err = np.r_[np.zeros((d_shift_err.shape[0]-1, d_shift_err.shape[1])), d_shift_err]
d_shift_err = np.c_[np.zeros((d_shift_err.shape[0], d_shift_err.shape[1]-1)), d_shift_err]
d_in_err[d_in_err == 0.] = np.nan
d_shift_err[d_shift_err == 0.] = np.nan
d_sf_err = np.array([[
np.sqrt(np.nansum((4.* (d_in[:nx_out - k, :ny_out - l] - d_shift[k:nx_out, l:ny_out])**2.\
* (d_in_err[:nx_out - k, :ny_out - l]**2. + d_shift_err[k:nx_out, l:ny_out]**2.))))\
/np.count_nonzero(~np.isnan(d_in[:nx_out - k, :ny_out - l] - d_shift[k:nx_out, l:ny_out]))
for l in range(ny_out)] for k in range(nx_out)])
return d_sf, d_sf_err
def pspec_1d(data, realfreq=False):
'''
Calculate Power-spectrum using FFT.
'''
nx = len(data)
d_in = np.r_[data - np.nanmean(data), np.zeros(nx-1)] # zero-padding
d_ft = fft(d_in) # Fourier transform
d_ft_cnj = np.conjugate(fft(d_in)) # complex conjugate
d_ps = (d_ft*d_ft_cnj).real # Power spectrum
if realfreq:
d_ps = d_ps[:len(d_ps)//2+1]
else:
d_ps = fftshift(d_ps)
return d_ps
def binning(bin_e, coordinates, data):
'''
Binning data according to given bins and a set of coordinates and data.
'''
#bin_c = 0.5 .*(bin_e[2:length(bin_e)] .+ bin_e[1:length(bin_e)-1])
d_bin = np.zeros(len(bin_e)-1)
for i in range(len(bin_e)-1):
indx = np.where( (coordinates >= bin_e[i]) & (coordinates < bin_e[i+1]))
if len(indx[0]) == 0:
d_bin[i] = np.nan
else:
d_bin[i] = np.nanmean(data[indx])
return d_bin
# for debug
def main():
# --------- input --------
# test with sin curve
nx, ny = [32, 32]
x = np.linspace(-np.pi,np.pi,nx)
y = np.linspace(-np.pi,np.pi,nx)
dx = x[1] - x[0]
dy = y[1] - y[0]
phi = 0.*np.pi # phase shift
# ------------------------
# ---------- start ---------
# grid
xx, yy = np.meshgrid(x, y, indexing='ij')
z = np.sin(xx+phi) + np.sin(yy+phi)
# --------------------------
if __name__ == '__main__':
main()
``` |
{
"source": "jinshiyi11/AppBuilder",
"score": 2
} |
#### File: jinshiyi11/AppBuilder/AppBuilder.py
```python
#coding=UTF-8
'''
Created on 2013-4-24
'''
import argparse
import os
import sys
import shutil
import xml.etree.ElementTree
from xml.etree.ElementTree import ElementTree
import Util
KEY_DIR="../MyAppKey/"
class BuildInfo(object):
'''
productName 产品名根据该字段生成备份路径
manualVersion AndroidManifest.xml中versionName的前3位版本号(第四位由打包脚本自动生成,表示打包次数)
buildCount 对应versionCount(表示打包次数)
autoPartVersion versionName的第四位版本号(第四位由打包脚本自动生成)
updateVersion 本次编译是否更新版本号,默认为True
channel 渠道号,对应AndroidManifest.xml文件中的UMENG_CHANNEL,用于通过友盟统计安装渠道
'''
def __init__(self):
self.backupRootDir="E:/backup/"
self.appRootDir=""
self.productName=""
self.manualVersion=""
self.buildCount=1
self.updateVersion=True
self.channel=""
self.isYoumengChannel=False
self.autoPartVersion=1
def getProductVersion(self):
"返回产品的4位版本号,如1.1.1.1"
#return self.manualVersion+"."+str(self.autoPartVersion)
return "%s.%04d" % (self.manualVersion,self.autoPartVersion)
def getProductBackupDir(self):
"z:/backup-xxx/"
return self.backupRootDir+"backup-"+self.productName+"/"
def getBaseBackupDir(self):
"z:/backup-xxx/1.1.1/"
return self.backupRootDir+"backup-"+self.productName+"/"+self.manualVersion+"/"
def getBackupDir(self):
"z:/backup-xxx/1.1.1/1.1.1.1/"
return self.getBaseBackupDir()+"/"+self.getProductVersion()+"/"
info=BuildInfo()
def initVersion():
"初始化版本号"
mainfestPath=info.appRootDir+"/AndroidManifest.xml"
if not os.path.exists(mainfestPath):
sys.exit("AndroidManifest.xml文件不存在:"+mainfestPath)
xml.etree.ElementTree.register_namespace("android","http://schemas.android.com/apk/res/android")
tree = ElementTree()
xmlRoot=tree.parse(mainfestPath)
versionName=xmlRoot.get("{http://schemas.android.com/apk/res/android}versionName")
versionCode=xmlRoot.get("{http://schemas.android.com/apk/res/android}versionCode")
print "versionName:"+versionName
vers=versionName.split(".")
if len(vers)<3:
sys.exit("AndroidManifest.xml文件:"+mainfestPath+"中的versionName格式不正确:"+versionName)
info.manualVersion=vers[0]+"."+vers[1]+"."+vers[2]
baseBackupDir=info.getBaseBackupDir()
if not os.path.exists(baseBackupDir):
os.makedirs(baseBackupDir)
#获取buildCount
buildCountFilePath=info.getProductBackupDir()+info.productName+".count"
if not os.path.exists(buildCountFilePath):
#创建buildCount文件
f=open(buildCountFilePath,'a+')
f.write("buildCount="+str(info.buildCount))
f.write("autoPartVersion="+str(info.autoPartVersion))
f.close()
else:
#读取buildCount文件并更新buildCount
globalData={}
execfile(buildCountFilePath,globalData)
info.buildCount=globalData['buildCount']
if info.updateVersion:
info.buildCount=info.buildCount+1
info.autoPartVersion=info.autoPartVersion+1
f=open(buildCountFilePath,'w+')
f.write("buildCount="+str(info.buildCount))
f.write("autoPartVersion="+str(info.autoPartVersion))
f.close()
#创建当前版本的备份目录
if not os.path.exists(info.getBackupDir()):
os.makedirs(info.getBackupDir())
#使用buildCount作为versionCode
versionCode=info.buildCount
#输出log,通知邮件会从中提取信息
print "file version:%s"%info.getProductVersion()
print "product version:%s"%info.getProductVersion()
print "version code:%s"%versionCode
#TODO:save xml
#更新AndroidManifest.xml文件的versionName字段
xmlRoot.set("{http://schemas.android.com/apk/res/android}versionName",info.getProductVersion())
xmlRoot.set("{http://schemas.android.com/apk/res/android}versionCode",str(versionCode))
#更新AndroidManifest.xml文件的UMENG_CHANNEL字段,用于统计安装渠道
# if info.isYoumengChannel:
# print "using Youmeng channel"
# channelNode=tree.find("./application/meta-data[@{http://schemas.android.com/apk/res/android}name='UMENG_CHANNEL']")
# channelNode.set("{http://schemas.android.com/apk/res/android}value",info.channel)
# else:
# channelFilePath=info.appRootDir+"/assets/channel_id.txt"
##先把老文件删除
# if os.path.exists(channelFilePath):
# os.remove(channelFilePath)
# f=open(channelFilePath,'w+')
# f.write(info.channel)
# f.close()
#tree.write(mainfestPath, "UTF-8", True, "http://schemas.android.com/apk/res/android")
tree.write(mainfestPath, "UTF-8", True)
#因为修改过AndroidManifest.xml文件,所以备份一下该文件看看修改有没有问题
shutil.copy(mainfestPath,info.getBackupDir())
def generateAntProperties(productName):
'''
因为安全原因,签名文件只在本地保存,不在svn上保存,这个函数用来从本地文件生成ant.properties,该文件指定签名信息
'''
keypath=KEY_DIR+productName+".py"
keyInfo=Util.load_module(keypath)
filePath=info.appRootDir+"/ant.properties"
print "开始删除ant.properties:"+filePath
if os.path.exists(filePath):
os.remove(filePath)
print "正在生成ant.properties"
f=open(filePath,'w+')
f.write("key.store=%s\r\n" % keyInfo.KEY_STORE_FILE_PATH)
f.write("key.alias=%s\r\n" % keyInfo.ALIAS_NAME)
f.write("key.store.password=%<PASSWORD>" % keyInfo.STORE_PASS)
f.write("key.alias.password=%<PASSWORD>" % keyInfo.KEY_PASS)
f.close()
import subprocess
def startBuild():
'执行编译操作'
#subprocess.check_call(["dir","/A"],shell=True)
subprocess.check_call(["ant","clean" ,"release"],shell=True,cwd=info.appRootDir)
#subprocess.check_call(["ant","release"])
def backupFiles():
'备份生成的apk和proguard相关文件'
if info.channel:
print "channel:"+info.channel
shutil.copy(info.appRootDir+"/bin/"+info.productName+"-release.apk",info.getBackupDir()+info.productName+"_"+info.channel+".apk")
else:
shutil.copy(info.appRootDir+"/bin/"+info.productName+"-release.apk",info.getBackupDir()+info.productName+".apk")
if os.path.exists(info.appRootDir+"/bin/proguard"):
shutil.copytree(info.appRootDir+"/bin/proguard",info.getBackupDir()+"proguard")
else:
print "Warning:no progurad info!!!!!"
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='apk打包脚本')
parser.add_argument('-p', dest='productName', required=True
, help='项目名称,如:browser')
parser.add_argument('-d', dest='appRootDir'
, help='app根目录,该目录即AndroidManifest.xml文件所在目录,没指定就使用当前目录')
parser.add_argument('-u', dest='updateVersion',action='store_true',default=True
, help='是否更新版本号,即更新AndroidManifest.xml文件中的android:versionName字段')
parser.add_argument('-c', dest='channel', required=False
, help='渠道号')
args = parser.parse_args()
print sys.argv
print args
info.productName=args.productName
# 没指定appRootDir就使用当前目录
if args.appRootDir:
#是否是相对路径
if args.appRootDir[0]=='.':
info.appRootDir=os.getcwd()+'/'+args.appRootDir
else:
info.appRootDir=args.appRootDir
os.chdir(info.appRootDir)
else:
info.appRootDir=os.getcwd()
print "当前目录:"+info.appRootDir
#updateVersion
info.updateVersion=args.updateVersion
#channel
if args.channel:
info.channel=args.channel
initVersion()
generateAntProperties(info.productName)
startBuild()
backupFiles()
``` |
{
"source": "JinShiyin/sast_backend",
"score": 2
} |
#### File: sast_backend/api/categories.py
```python
import json
import os
from flask_restplus import Namespace, Resource, reqparse
from flask_login import login_required, current_user
from mongoengine.errors import NotUniqueError
import datetime
from config import Config
api = Namespace('category', description='Category related operations')
os.makedirs(Config.CATEGORIES_DIRECTORY, exist_ok=True)
create_category = reqparse.RequestParser()
create_category.add_argument('name', required=True, location='json')
create_category.add_argument('supercategory', location='json')
create_category.add_argument('color', location='json')
create_category.add_argument('metadata', type=dict, location='json')
create_category.add_argument(
'keypoint_edges', type=list, default=[], location='json')
create_category.add_argument(
'keypoint_labels', type=list, default=[], location='json')
create_category.add_argument(
'keypoint_colors', type=list, default=[], location='json')
update_category = reqparse.RequestParser()
update_category.add_argument('name', required=True, location='json')
update_category.add_argument('supercategory', location='json')
update_category.add_argument('color', location='json')
update_category.add_argument('metadata', type=dict, location='json')
update_category.add_argument('keypoint_edges', type=list, location='json')
update_category.add_argument('keypoint_labels', type=list, location='json')
update_category.add_argument('keypoint_colors', type=list, location='json')
page_data = reqparse.RequestParser()
page_data.add_argument('page', default=1, type=int)
page_data.add_argument('limit', default=20, type=int)
@api.route('/')
class Category(Resource):
def get(self):
""" Returns all categories """
# return query_util.fix_ids(current_user.categories.all())
category_ids = os.listdir(Config.CATEGORIES_DIRECTORY)
categories = []
for c in category_ids:
categories.append(json.load(open(os.path.join(Config.CATEGORIES_DIRECTORY, c))))
return categories
@api.expect(create_category)
def post(self):
""" Creates a category """
args = create_category.parse_args()
name = args.get('name')
supercategory = args.get('supercategory')
metadata = args.get('metadata', {})
color = args.get('color')
keypoint_edges = args.get('keypoint_edges')
keypoint_labels = args.get('keypoint_labels')
keypoint_colors = args.get('keypoint_colors')
category_id = len(os.listdir(Config.CATEGORIES_DIRECTORY))
try:
category = {
'name': name,
'supercategory': supercategory,
'color': color,
'metadata': metadata,
'keypoint_edges': keypoint_edges,
'keypoint_labels': keypoint_labels,
'keypoint_colors': keypoint_colors,
}
with open(os.path.join(Config.CATEGORIES_DIRECTORY, f'{category_id}.json'), 'w') as f:
json.dump(category, f)
except NotUniqueError as e:
return {'message': 'Category already exists. Check the undo tab to fully delete the category.'}, 400
return category
@api.route('/<int:category_id>')
class Category(Resource):
def get(self, category_id):
""" Returns a category by ID """
# category = current_user.categories.filter(id=category_id).first()
category = json.load(open(os.path.join(Config.CATEGORIES_DIRECTORY, category_id)))
if category is None:
return {'success': False}, 400
# return query_util.fix_ids(category)
return category
def delete(self, category_id):
""" Deletes a category by ID """
category = current_user.categories.filter(id=category_id).first()
if category is None:
return {"message": "Invalid image id"}, 400
if not current_user.can_delete(category):
return {"message": "You do not have permission to delete this category"}, 403
category.update(set__deleted=True,
set__deleted_date=datetime.datetime.now())
return {'success': True}
@api.expect(update_category)
def put(self, category_id):
""" Updates a category name by ID """
category = current_user.categories.filter(id=category_id).first()
# check if the id exits
if category is None:
return {"message": "Invalid category id"}, 400
args = update_category.parse_args()
name = args.get('name')
supercategory = args.get('supercategory', category.supercategory)
color = args.get('color', category.color)
metadata = args.get('metadata', category.metadata)
keypoint_edges = args.get('keypoint_edges', category.keypoint_edges)
keypoint_labels = args.get('keypoint_labels', category.keypoint_labels)
keypoint_colors = args.get('keypoint_colors', category.keypoint_colors)
# check if there is anything to update
if category.name == name \
and category.supercategory == supercategory \
and category.color == color \
and category.keypoint_edges == keypoint_edges \
and category.keypoint_labels == keypoint_labels \
and category.keypoint_colors == keypoint_colors:
return {"message": "Nothing to update"}, 200
# check if the name is empty
if not name:
return {"message": "Invalid category name to update"}, 400
# update name of the category
# check if the name to update exits already in db
# @ToDo: Is it necessary to allow equal category names among different creators?
category.name = name
category.supercategory = supercategory
category.color = color
category.keypoint_edges = keypoint_edges
category.keypoint_labels = keypoint_labels
category.keypoint_colors = keypoint_colors
try:
category.update(
name=category.name,
supercategory=category.supercategory,
color=category.color,
metadata=category.metadata,
keypoint_edges=category.keypoint_edges,
keypoint_labels=category.keypoint_labels,
keypoint_colors=category.keypoint_colors,
)
except NotUniqueError:
# it is only triggered when the name already exists and the creator is the same
return {"message": "Category '" + name_to_update + "' already exits"}, 400
return {"success": True}
@api.route('/data')
class CategoriesData(Resource):
@api.expect(page_data)
def get(self):
""" Endpoint called by category viewer client """
pass
# args = page_data.parse_args()
# limit = args['limit']
# page = args['page']
#
# categories = current_user.categories.filter(deleted=False)
#
# pagination = Pagination(categories.count(), limit, page)
# categories = query_util.fix_ids(
# categories[pagination.start:pagination.end])
#
# for category in categories:
# category['numberAnnotations'] = AnnotationModel.objects(
# deleted=False, category_id=category.get('id')).count()
#
# return {
# "pagination": pagination.export(),
# "page": page,
# "categories": categories
# }
```
#### File: sast_backend/cast/sort.py
```python
import os
import json
import cv2
from pathlib import Path
import numpy as np
def sort_landmarks_sparse_1(output_dir: Path):
for d in output_dir.iterdir():
landmarks_dir = d / 'landmarks'
json_dir = d / 'results'
results = list(json_dir.glob('*.json'))
for r in results:
print(r)
res = json.load(open(r))
if 'faces' not in res:
return None
if 'landmark' not in res['faces'][0]:
return None
landmarks = res['faces'][0]['landmark']
landmarks_list = []
landmarks = sort_dict(landmarks)
# print_result(printFuctionTitle("人脸关键点检测"), landmarks)
for k, landmark in landmarks.items():
landmarks_list.append([landmark['x'], landmark['y']])
landmarks_list = np.array(landmarks_list)
img_name = os.path.splitext(os.path.basename(r))[0]
txt_name = img_name + '.txt'
np.savetxt(str(landmarks_dir / txt_name), landmarks_list, fmt="%d")
def sort_landmarks_sparse_2(output_dir: Path):
landmarks_dir = output_dir / 'landmarks'
json_dir = output_dir / 'results'
results = list(json_dir.glob('*.json'))
for r in results:
print(r)
res = json.load(open(r))
if 'faces' not in res:
return None
if 'landmark' not in res['faces'][0]:
return None
landmarks = res['faces'][0]['landmark']
landmarks_list = []
landmarks = sort_dict(landmarks)
# print_result(printFuctionTitle("人脸关键点检测"), landmarks)
for k, landmark in landmarks.items():
landmarks_list.append([landmark['x'], landmark['y']])
landmarks_list = np.array(landmarks_list)
img_name = os.path.splitext(os.path.basename(r))[0]
txt_name = img_name + '.txt'
np.savetxt(str(landmarks_dir / txt_name), landmarks_list, fmt="%d")
def sort_landmarks_dense_1(output_dir: Path):
for d in output_dir.iterdir():
landmarks_dir = d / 'landmarks'
json_dir = d / 'results'
results = list(json_dir.glob('*.json'))
for r in results:
print(r)
res = json.load(open(r))
if 'face' not in res:
return None
if 'landmark' not in res['face']:
return None
landmarks = res['face']['landmark']
landmarks_list = []
# print_result(printFuctionTitle("人脸关键点检测"), landmarks)
for region, landmarks_dict in landmarks.items():
landmarks_dict = sort_dict(landmarks_dict)
for k, landmark in landmarks_dict.items():
landmarks_list.append([landmark['x'], landmark['y']])
landmarks_list = np.array(landmarks_list)
img_name = os.path.splitext(os.path.basename(r))[0]
txt_name = img_name + '.txt'
np.savetxt(str(landmarks_dir / txt_name), landmarks_list, fmt="%d")
def sort_landmarks_dense_2(output_dir: Path):
landmarks_dir = output_dir / 'landmarks'
json_dir = output_dir / 'results'
results = list(json_dir.glob('*.json'))
for r in results:
print(r)
res = json.load(open(r))
if 'face' not in res:
return None
if 'landmark' not in res['face']:
return None
landmarks = res['face']['landmark']
# print_result(printFuctionTitle("人脸关键点检测"), landmarks)
landmarks_list = []
for region, landmarks_dict in landmarks.items():
landmarks_dict = sort_dict(landmarks_dict)
for k, landmark in landmarks_dict.items():
landmarks_list.append([landmark['x'], landmark['y']])
landmarks_list = np.array(landmarks_list)
img_name = os.path.splitext(os.path.basename(r))[0]
txt_name = img_name + '.txt'
np.savetxt(str(landmarks_dir / txt_name), landmarks_list, fmt="%d")
def sort_dict(landmarks_dict):
landmarks_list = sorted(landmarks_dict.items(), key=lambda d: d[0])
new_dict = {}
for entry in landmarks_list:
new_dict[entry[0]] = entry[1]
return new_dict
def sortedDictValues(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
# landmarks_path = 'datasets/Articst-faces/landmarks'
# landmarks_path = 'datasets/WebCariTrain/landmarks/845'
landmarks_path = 'datasets/Articst-faces/landmarks'
# dataset_name = 'AF_dataset'
# output_name = 'AF-landmarks-83'
landmarks_path = Path(landmarks_path)
# sort_landmarks_dense_2(landmarks_path)
sort_landmarks_dense_1(landmarks_path)
# sort_landmarks_sparse_1(landmarks_path)
```
#### File: cast/utils/warp.py
```python
import logging
import os
import random
import cv2
import numpy as np
import torch
import torchvision
from PIL import Image
from skimage.transform import PiecewiseAffineTransform, warp
from config.config import setup_logging, DEBUG
from constant import *
from utils.misc import label_list, AngleFactory, image2label
from utils.transforms import ToUnNormalizedTensor
logger_name = 'warp_logger'
level = logging.INFO
logger = setup_logging('.', logger_name, level)
# CARI_IMG_PATH = '../datasets/Caricature-img'
# FACE_IMG_PATH = '../datasets/CelebA-HQ-img'
# CARI_DATASET_PATH = '../datasets/Caricature-mask'
# FACE_DATASET_PATH = '../datasets/CelebAMaskHQ-mask'
# CARI_DATASET_COLOR_PATH = '../datasets/Caricature-mask-color'
# FACE_DATASET_COLOR_PATH = '../datasets/CelebAMaskHQ-mask-color'
# FACE_WARPED = '../datasets/CelebA-HQ-img-Warped'
face_img_name = '1.png'
cari_img_name = '1'
face_mask_path = os.path.join(FACE_MASK_PATH, face_img_name)
face_path = os.path.join(FACE_IMG_PATH, '1.jpg')
cari_mask_path = os.path.join(CARI_MASK_PATH, cari_img_name + '.png')
cari_path = os.path.join(CARI_IMG_PATH, cari_img_name + '.jpg')
face_mask = cv2.imread(face_mask_path, cv2.IMREAD_GRAYSCALE)
cari_mask = cv2.imread(cari_mask_path, cv2.IMREAD_GRAYSCALE)
# 'skin', 'nose', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'mouth', 'u_lip','l_lip'
# sample_num_list = [50, 50, 50, 50, 50, 50, 50, 50, 50, 50]
sample_num_list = [80, 50, 50, 25, 25, 25, 25, 30, 20, 20]
# sample_num_list = [50, 50, 50, 25, 25, 25, 25, 30, 20, 20]
# sample_num_list = [50, 50, 20, 20, 20, 20, 20, 20, 20, 20]
face = cv2.imread(face_path)
cari = cv2.imread(cari_path)
transforms = [torchvision.transforms.Resize(512), ToUnNormalizedTensor()]
transforms = torchvision.transforms.Compose(transforms)
# face_torch = transforms(Image.open(face_path))
def warp_image(image, src_points=None, dst_points=None, transform=None):
if transform is None:
if src_points is not None and dst_points is not None:
transform = get_transform(image, src_points, dst_points)
else:
raise Exception('Src points and dst points must not be None.')
warped = warp(image, transform, output_shape=image.shape)
return warped, transform
def warp_nearest(image, src_points=None, dst_points=None, transform=None):
if transform is None:
if src_points is not None and dst_points is not None:
transform = get_transform(image, src_points, dst_points)
else:
raise Exception('Src points and dst points must not be None.')
warped = warp(image, transform, output_shape=image.shape, order=0)
return warped, transform
def get_transform(image, src_points, dst_points):
src_points = np.array(
[
[0, 0], [0, image.shape[0]],
[image.shape[0], 0], list(image.shape[:2])
] + src_points.tolist()
)
dst_points = np.array(
[
[0, 0], [0, image.shape[0]],
[image.shape[0], 0], list(image.shape[:2])
] + dst_points.tolist()
)
tform3 = PiecewiseAffineTransform()
tform3.estimate(dst_points, src_points)
return tform3
def sample_arrange(src, num, label):
"""
Sample key points by equal spaing
:param src:
:param num:
:return:
"""
arrange = len(src)
# if num > len(src):
# logger.info("Num out of length, return arrange: [{}]".format(src))
# return src
# else:
# output = np.array((1, 2), dtype=arrange.dtype)
output = []
seg = arrange // num
if seg == 0:
msg = '[{}]: The number of sampling points exceeds the number of source points, and the original array is ' \
'equidistantly filled.'.format(label)
logger.info(msg)
return insert_equal_space(src, arrange, num)
seg = arrange / num
for n in range(num):
if int(seg * n) >= len(src):
output.append((src[-1] + src[-2]) // 2)
else:
output.append(src[int(seg * n)])
return output
def insert_equal_space(src, arrange, num):
output = src.copy()
need = num - arrange
sample_space = need // arrange
mod = need % arrange
position = 1
for idx in range(arrange):
# is_enough = False
pre_el = src[idx]
next_el = src[(idx + 1) % arrange]
output = fill(pre_el, next_el, position, sample_space, output)
position += (sample_space + 1)
if len(output) == num:
return output.reshape(-1, 2)
else:
for idx in range(mod):
output = np.append(output, src[-1])
return output.reshape(-1, 2)
def fill(pre_el, next_el, position, sample_space, output):
for j in range(sample_space):
sample = (pre_el + next_el) // (sample_space + 1) * (j + 1)
output = np.insert(output, position + j, sample.reshape(2), axis=0)
return output
def is_filtered(points):
return len(points) == 1 and (points == np.array([[-1, -1]])).all()
def find_key_points(img, sample_num_list):
import cv2
excluded_index = [1, 7]
labels_tensor = np.arange(0, len(label_list)).reshape(len(label_list), 1, 1)
# labels_tensor = torch.arange(0, len(label_list)).view(len(label_list), 1, 1)
split_tensors = (img == labels_tensor).astype(np.uint8)
point_list_sorted_by_polar = []
# np.arang
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
for index, tensor in enumerate(split_tensors):
if index in excluded_index:
# key_points[index] = np.array([[-1, -1]])
point_list_sorted_by_polar.append(np.array([[-1, -1]]))
logger.info('Semantic label: [{}] is excluded.'.format(index))
continue
color = colormap[tensor].astype(np.uint8)
label = label_list[index]
# gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)
# cv2.imshow('gray', gray)
# ret, binary = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)
tensor = tensor * 255
# connects some semantic attribute for generating only one contours
tensor = cv2.morphologyEx(tensor, cv2.MORPH_CLOSE, kernel)
ret, binary = cv2.threshold(tensor, 10, 255, cv2.THRESH_BINARY)
# Skin reverser color ensure finding only on contour
if index == 0:
binary = cv2.bitwise_not(binary)
# if DEBUG:
# cv2.imshow('binary', binary)
# cv2.waitKey(0)
tensor, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
logger.info("Semantic label [{}] find contours: [{}]".format(label, len(contours)))
if not len(contours):
logger.error('Cannot find contours for semantic label [{}], return None for filtering this img.'.format(
label))
return None
# point_list_sorted_by_polar.append(np.array([[-1, -1]]))
if len(contours) > 1:
contours = [max(contours, key=cv2.contourArea)]
unit_anchor = np.array([0, 1])
for points in contours:
mom = cv2.moments(points)
# print(points.shape)
centroid = np.array([int(mom['m10'] / mom['m00']), int(mom['m01'] / mom['m00'])])
cv2.circle(color, (centroid[0], centroid[1]), 5, (0, 0, 255), -1)
points = points.reshape(-1, 2)
points = [[p, AngleFactory.calAngleClockwise(unit_anchor + centroid, p, centroid)] for p in points]
points_sorted_by_polar = [el[0] for el in sorted(points, key=lambda el: el[1])]
logger.info(
"Semantic label [{}] gains [{}] contour points.".format(label,
len(points_sorted_by_polar)))
point_list_sorted_by_polar.append(points_sorted_by_polar)
if DEBUG:
dynamic_display_ordered_contour_points(index, color, points_sorted_by_polar)
key_point_list = []
for index, key_points in enumerate(point_list_sorted_by_polar):
label = label_list[index]
if is_filtered(key_points):
logger.info('Semantic tensor [{}] do not contain any contour points or filtered by configuration'.format(
label))
key_point_list.append(np.array([[-1, -1]]))
continue
sampled_key_point = sample_arrange(key_points, sample_num_list[index], label)
if len(sampled_key_point) != sample_num_list[index]:
msg = 'The number of sampling points [{}] must be the same as the number [{}] specified by the configuration in [{}].'.format(
len(key_points), sample_num_list[index])
logger.error(msg)
return None
logger.debug('Semantic label [{}] sampled: [{}].'.format(label, sampled_key_point))
key_point_list.append(sampled_key_point)
return key_point_list
# centriods.append((center_x, center_y))
# cv2.circle(color, (center_x, center_y), 4, (152, 255, 255), -1)
# cv2.imshow('moment', color)
# cv2.waitKey(0)
# print(img.shape)
# print(split_tensors.shape)
def dynamic_display_ordered_contour_points(label_index, color, points_sorted_by_polar):
tmp_path = 'polar'
if not os.path.exists(tmp_path):
os.mkdir(tmp_path)
path = os.path.join(tmp_path, str(label_index))
if not os.path.exists(path):
os.mkdir(path)
# hstack = []
for index, p in enumerate(points_sorted_by_polar):
if index % 20 == 0:
cv2.circle(color, (p[0], p[1]), 4, (152, 255, 255), -1)
cv2.imwrite(os.path.join(path, str(index)) + '.png', color)
# hstack.append(color.copy())
# vstack = []
# j = 0
# for index in len(hstack):
# if (index + 1) % 4:
# vstack.append(np.vstack(hstack[j * 4:index]))
# cv2.imwrite(os.path.join(path, str(index)) + '.png', color)
# cv2.waitKey(0)
def display_pair_key_points(face_src, cari_src, f_kl, c_kl):
face_img = face_src.copy()
cari_img = cari_src.copy()
for index in range(len(f_kl)):
fpts = f_kl[index]
cpts = c_kl[index]
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
if is_filtered(fpts) or is_filtered(cpts):
continue
for idx in range(len(fpts)):
cv2.circle(face_img, center=(fpts[idx][0], fpts[idx][1]), radius=2, color=(b, g, r), thickness=-1)
cv2.circle(cari_img, center=(cpts[idx][0], cpts[idx][1]), radius=2, color=(b, g, r), thickness=-1)
# cv2.imshow('Key points', img)
# cv2.waitKey(0)
return face_img, cari_img
def draw_kpts(src, kpts):
face_img = src.copy()
for p in kpts:
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
if is_filtered(p):
continue
for idx in range(len(p)):
cv2.circle(face_img, center=(p[0], p[1]), radius=2, color=(b, g, r), thickness=-1)
# cv2.imshow('Key points', img)
# cv2.waitKey(0)
return face_img
def draw_kpts_pil(src, kpts):
img = cv2.cvtColor(np.array(src), cv2.COLOR_RGB2BGR)
kpts = kpts.int().numpy().reshape(-1, 2)
# img = cv2.resize(img, (0, 0), fx=scale, fy=scale)
return Image.fromarray(cv2.cvtColor(draw_kpts(img, kpts), cv2.COLOR_BGR2RGB))
def warp_paired(face_img_name, cari_img_name, face_mask_path, cari_mask_path, face_path, cari_path, sample_num_list):
# test_loader()
# test_celeb_mask_loading()
# face_mask = cv2.imread(face_mask_path, cv2.IMREAD_GRAYSCALE)
# cari_mask = cv2.imread(cari_mask_path, cv2.IMREAD_GRAYSCALE)
face_color = colormap[face_mask].astype(np.uint8)
cari_color = colormap[cari_mask].astype(np.uint8)
face = cv2.imread(face_path)
cari = cv2.imread(cari_path)
face = cv2.resize(face, (0, 0), fx=0.5, fy=0.5)
if face_mask is None:
logger.info('Loading Img Error, [{}] not found.'.format(face_mask_path))
# sample_num_list = [30, 30, 30, 30, 30, 30, 30, 30, 30, 30]
# sample_num_list = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
ckpts, fkpts, k_cari, k_face = get_paired_key_points(face_img_name, cari_img_name, face_mask, cari_mask,
sample_num_list, face, cari)
warped, warped_mask, warped_mask_color, transform = warped_face_mask(ckpts, face, face_color, fkpts)
# x_position_map, y_position_map = build_position_map(face.shape[1], face.shape[0])
# x_position_map = make_x_position_map(1,face_mask.shape[1]).reshape()
# warped_xpm, _ = warp_image(x_position_map, transform=transform)
# warped_ypm, _ = warp_image(y_position_map, transform=transform)
# print(x_position_map)
# delta_x = (warped_xpm * 255).astype(np.uint8) - x_position_map
# delta_y = (warped_ypm * 255).astype(np.uint8) - y_position_map
if DEBUG:
stack = np.hstack((k_face, k_cari, warped))
stack_mask = np.hstack((face_color, cari_color, warped_mask_color))
stack_mask = cv2.cvtColor(stack_mask, cv2.COLOR_RGB2BGR)
stack_all = np.vstack((stack, stack_mask))
if not os.path.exists(FACE_WARPED):
os.mkdir(FACE_WARPED)
cv2.imwrite(os.path.join(FACE_WARPED, str(len(FACE_WARPED) + 1) + '.png'), stack_all)
return warped_mask
def estimate_offset_field(face_mask, cari_mask, face_img_name, cari_img_name, sample_num_list):
width, height = face_mask.shape[1], face_mask.shape[0]
if face_mask is None:
logger.info('Loading Img Error, [{}] not found.'.format(face_mask_path))
ckpts, fkpts = get_paired_key_points(face_img_name, cari_img_name, face_mask, cari_mask,
sample_num_list)
return estimate_offset_field_by_kpts(fkpts, ckpts, height, width)
# warped_position_map = torch.cat(
# [torch.from_numpy(warped_xpm).view(width, width, 1), torch.from_numpy(warped_ypm).view(height, height, 1)],
# dim=2).unsqueeze(0)
# tmp = F.grid_sample(face_torch.unsqueeze(0).double(), warped_position_map)
# save_image(tmp.long(), 'test.png')
# print(offset_filed.size())
# print(delta_x[256:300, 256:300, 0])
def load_warp_from_npy(path):
if not os.path.exists(path):
logger.info('Empty path: [{}]'.format(path))
return None
else:
return torch.from_numpy(np.load(path).reshape(1, IMG_SIZE, IMG_SIZE, 2)).float()
def estimate_offset_field_by_kpts(fkpts, ckpts, height, width, x_position_map=None, y_position_map=None):
delta_x, delta_y = get_split_delta(ckpts, fkpts, height, width, x_position_map, y_position_map)
offset_filed = torch.cat([delta_x, delta_y], dim=2)
return offset_filed.unsqueeze(0).float()
def get_split_delta(ckpts, fkpts, height, width, x_position_map=None, y_position_map=None):
warped_xpm, warped_ypm, x_position_map, y_position_map = get_warped_split_position_map(ckpts, fkpts, height, width,
x_position_map,
y_position_map)
delta_x = (warped_xpm - x_position_map).view(width, width, 1)
delta_y = (warped_ypm - y_position_map).view(height, height, 1)
return delta_x, delta_y
def get_warped_split_position_map(ckpts, fkpts, height, width, x_position_map=None, y_position_map=None):
if x_position_map is None or y_position_map is None:
x_position_map, y_position_map = get_split_position_map(height, width)
warped_xpm, warped_ypm = warp_position_map(ckpts, fkpts, x_position_map, y_position_map)
x_position_map = torch.from_numpy(x_position_map).float()
y_position_map = torch.from_numpy(y_position_map).float()
warped_xpm = torch.from_numpy(warped_xpm).float()
warped_ypm = torch.from_numpy(warped_ypm).float()
return warped_xpm, warped_ypm, x_position_map, y_position_map
def get_warped_position_map(ckpts, fkpts, height, width):
x_position_map, y_position_map = get_split_position_map(height, width)
warped_xpm, warped_ypm = warp_position_map(ckpts, fkpts, x_position_map, y_position_map)
# x_position_map = torch.from_numpy(x_position_map)
# y_position_map = torch.from_numpy(y_position_map)
warped_xpm = torch.from_numpy(warped_xpm).view(width, width, 1)
warped_ypm = torch.from_numpy(warped_ypm).view(height, height, 1)
warped_pm = torch.cat([warped_xpm, warped_ypm], dim=2).unsqueeze(0)
return warped_pm.float()
def get_split_position_map(height, width):
x_position_map = make_x_position_map(1, -1, 1, width, height).view(width, width).numpy()
y_position_map = make_y_position_map(1, -1, 1, width, height).view(height, height).numpy()
return x_position_map, y_position_map
def warp_position_map(ckpts, fkpts, x_position_map, y_position_map):
warped_xpm, transform = warp_image(x_position_map, fkpts, ckpts)
warped_ypm, transform = warp_image(y_position_map, transform=transform)
return warped_xpm, warped_ypm
def diff_on_same_scale(x, y, start, end):
return normalize(start, end, x) - (start, end, y)
def normalize(start, end, tensor):
tensor = tensor.astype(np.float)
max = np.max(tensor)
min = np.min(tensor)
k = (start - end) / (max - min)
return start + k * (tensor - min)
def warped_face_mask(ckpts, face, face_color, fkpts):
warped, transform = warp_image(face, fkpts, ckpts)
# warped, transform = warp_nearest(face, fkpts, ckpts)
warped = (warped * 255).astype(np.uint8)
warped_mask, warped_mask_color = warped_color(fkpts, ckpts, face_color, transform)
return warped, warped_mask, warped_mask_color, transform
def warped_color(fkpts, ckpts, face_color, transform=None):
warped_mask_color, _ = warp_image(face_color, fkpts, ckpts, transform)
warped_mask_color = (warped_mask_color * 255).astype(np.uint8)
warped_mask = image2label(warped_mask_color)
return warped_mask, warped_mask_color
def warped_color_nearest(fkpts, ckpts, face_color, transform=None):
warped_mask_color, _ = warp_nearest(face_color, fkpts, ckpts, transform)
warped_mask_color = (warped_mask_color * 255).astype(np.uint8)
warped_mask = image2label(warped_mask_color)
return warped_mask, warped_mask_color
def get_paired_key_points(face_img_name, cari_img_name, face_mask, cari_mask, sample_num_list, face=None, cari=None):
face_key_point_list = find_key_points(face_mask, sample_num_list)
cari_key_point_list = find_key_points(cari_mask, sample_num_list)
# Validate consistency of key points
fkpts_len = len(face_key_point_list)
ckpts_len = len(cari_key_point_list)
if fkpts_len != ckpts_len:
raise Exception('Face and caricature semantic labels must be consistency.')
for idx in range(len(face_key_point_list)):
if len(face_key_point_list[idx]) != len(cari_key_point_list[idx]):
msg = 'Face [{}] and caricature [{}] key points must be consistency.'.format(face_img_name, cari_img_name)
raise Exception(msg)
# merge all attribute key points into one list on warping stage
fkpts = merge_key_points(face_key_point_list)
ckpts = merge_key_points(cari_key_point_list)
merge_key_points(cari_key_point_list, ckpts)
fkpts = np.array(fkpts)
ckpts = np.array(ckpts)
if face is not None and cari is not None:
k_face, k_cari = display_pair_key_points(face, cari, face_key_point_list, cari_key_point_list)
return ckpts, fkpts, k_cari, k_face
return ckpts, fkpts
def merge_key_points(kpts, merged=None):
merged = []
for p in kpts:
merged.extend(p)
return merged
def make_position_map(batch, start, end, width, height):
x_position_map = make_x_position_map(batch, start, end, width, height)
y_position_map = make_y_position_map(batch, start, end, height, height)
position_map = torch.cat([x_position_map, y_position_map], dim=3)
return position_map
def make_y_position_map(batch, start, end, width, height):
height_linspace = torch.linspace(start=start, end=end, steps=height, requires_grad=False, dtype=torch.float).view(
height, 1)
y_position_map = height_linspace.expand(width, height).view(1, width, height, 1).expand(batch,
width,
height, 1)
return y_position_map
def make_x_position_map(batch, start, end, width, height):
width_linspace = torch.linspace(start=start, end=end, steps=width, requires_grad=False, dtype=torch.float).view(1,
width)
x_position_map = width_linspace.expand(width, height).view(1, width, height, 1).expand(batch,
width,
height, 1)
return x_position_map
def build_position_map(height, width):
# face = cv2.imread(face_path)
# height = face.shape[0]
# width = face.shape[1]
x_position_map = np.linspace(start=0, stop=width - 1, num=width, dtype=np.uint8).reshape(1, width)
x_position_map = np.tile(x_position_map, width - 1, axis=0)
y_position_map = np.linspace(start=0, stop=height - 1, num=height, dtype=np.uint8).reshape(height, 1)
y_position_map = np.repeat(y_position_map, height - 1, axis=1)
print(x_position_map[0, -10:])
print(y_position_map[0:10, 0])
return x_position_map, y_position_map
def test_sample_arrange():
test_pts = np.array([[10, 20], [30, 40], [50, 60], [70, 80]])
pts = sample_arrange(test_pts, 13)
print(len(pts))
pts = sample_arrange(test_pts, 14)
print(len(pts))
pts = sample_arrange(test_pts, 15)
print(len(pts))
pts = sample_arrange(test_pts, 200)
print(len(pts))
def test_warp_paired():
msg = 'Expected Key Points sample number for each semantic channel:'
for index, num in enumerate(sample_num_list):
msg += ' ,[{}]: [{}]'.format(label_list[index], num)
logger.info(msg)
warp_paired(face_img_name, cari_img_name, face_mask_path, cari_mask_path, face_path, cari_path, sample_num_list)
def test_estimate_offset_field():
offset_field = estimate_offset_field(face_mask, cari_mask, face_img_name, cari_mask_path, sample_num_list)
print(offset_field.size())
if __name__ == '__main__':
test_warp_paired()
# face_color = colormap[face_mask].astype(np.uint8)
# for i in range(4):
# mask = cv2.imread(str(i + 1) + '.png', cv2.IMREAD_GRAYSCALE)
# print(np.max(mask))
# print(np.min(mask))
# face_color = colormap[mask].astype(np.uint8)
# cv2.imshow('mask', cv2.cvtColor(face_color, cv2.COLOR_BGR2RGB))
# cv2.waitKey(0)
#
# colorize()
# test_estimate_offset_field()
# test_sample_arrange()
```
#### File: sast_backend/config/config.py
```python
import os
import subprocess
# def get_tag():
# result = subprocess.run(["git", "describe", "--abbrev=0", "--tags"], stdout=subprocess.PIPE)
# return str(result.stdout.decode("utf-8")).strip()
def _get_bool(key, default_value):
if key in os.environ:
value = os.environ[key]
if value == 'True' or value == 'true' or value == '1':
return True
return False
return default_value
class Config:
NAME = os.getenv("NAME", "SAST System")
# VERSION = get_tag()
VERSION = 'v1.0'
### Dataset Options
CONTENT_DIRECTORY = os.getenv("CONTENT_DIRECTORY", "data/contents")
STYLE_DIRECTORY = os.getenv("STYLE_DIRECTORY", "data/styles")
STYLIZATION_DIRECTORY = os.getenv("STYLIZATION_DIRECTORY", "data/stylizations")
MAST_TOTAL_TIME = 1.5
ANNOTATION_DIRECTORY = os.getenv("ANNOTATION_DIRECTORY", "data/annotations")
CATEGORIES_DIRECTORY = os.getenv("CATEGORIES_DIRECTORY", "data/categories")
__all__ = ["Config"]
```
#### File: mast/libs/photo_smooth.py
```python
from __future__ import division
import torch
import torch.nn as nn
import scipy.misc
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
from numpy.lib.stride_tricks import as_strided
from PIL import Image
from torchvision import transforms
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from smooth_filter import smooth_filter
class Propagator(nn.Module):
def __init__(self, beta=0.9999):
super(Propagator, self).__init__()
self.beta = beta
def process_(self, initImg, contentImg):
if type(contentImg) == str:
content = scipy.misc.imread(contentImg, mode='RGB')
else:
content = contentImg.copy()
# content = scipy.misc.imread(contentImg, mode='RGB')
if type(initImg) == str:
B = scipy.misc.imread(initImg, mode='RGB').astype(np.float64) / 255
else:
B = scipy.asarray(initImg).astype(np.float64) / 255
# B = self.
# B = scipy.misc.imread(initImg, mode='RGB').astype(np.float64)/255
h1, w1, k = B.shape
h = h1 - 4
w = w1 - 4
B = B[int((h1 - h) / 2):int((h1 - h) / 2 + h), int((w1 - w) / 2):int((w1 - w) / 2 + w), :]
content = scipy.misc.imresize(content, (h, w))
B = self.__replication_padding(B, 2)
content = self.__replication_padding(content, 2)
content = content.astype(np.float64) / 255
B = np.reshape(B, (h1 * w1, k))
W = self.__compute_laplacian(content)
W = W.tocsc()
dd = W.sum(0)
dd = np.sqrt(np.power(dd, -1))
dd = dd.A.squeeze()
D = scipy.sparse.csc_matrix((dd, (np.arange(0, w1 * h1), np.arange(0, w1 * h1)))) # 0.026
S = D.dot(W).dot(D)
A = scipy.sparse.identity(w1 * h1) - self.beta * S
A = A.tocsc()
solver = scipy.sparse.linalg.factorized(A)
V = np.zeros((h1 * w1, k))
V[:, 0] = solver(B[:, 0])
V[:, 1] = solver(B[:, 1])
V[:, 2] = solver(B[:, 2])
V = V * (1 - self.beta)
V = V.reshape(h1, w1, k)
V = V[2:2 + h, 2:2 + w, :]
img = Image.fromarray(np.uint8(np.clip(V * 255., 0, 255.)))
return img
def process(self, stylized_tensor, content_tensor):
"""
:param stylized_tensor: (b,c,h,w)
:param content_tensor: (b,c,h,w)
:return:
"""
# print(f'stylized.size-{stylized_tensor.size()}, content.size={content_tensor.size()}')
stylized_img = stylized_tensor.clone()
content_img = content_tensor.clone()
b, c, h, w = content_img.size()
device = stylized_img.device
ori_type = stylized_img.type
res = []
for i in range(b):
s_img = stylized_img[i].float()
s_img = transforms.ToPILImage()(s_img.cpu()).convert('RGB')
c_img = content_img[i].float()
c_img = transforms.ToPILImage()(c_img.cpu()).convert('RGB')
s_img = s_img.resize((w, h), Image.ANTIALIAS)
temp = self.process_(s_img, c_img)
temp = smooth_filter(temp, c_img, f_radius=15, f_edge=1e-1)
temp = transforms.ToTensor()(temp).to(device).unsqueeze(0).type_as(stylized_tensor)
res.append(temp.clone())
res = torch.cat(res, dim=0)
return res
# Returns sparse matting laplacian
# The implementation of the function is heavily borrowed from
# https://github.com/MarcoForte/closed-form-matting/blob/master/closed_form_matting.py
# We thank <NAME> for sharing his code.
def __compute_laplacian(self, img, eps=10 ** (-7), win_rad=1):
win_size = (win_rad * 2 + 1) ** 2
h, w, d = img.shape
c_h, c_w = h - 2 * win_rad, w - 2 * win_rad
win_diam = win_rad * 2 + 1
indsM = np.arange(h * w).reshape((h, w))
ravelImg = img.reshape(h * w, d)
win_inds = self.__rolling_block(indsM, block=(win_diam, win_diam))
win_inds = win_inds.reshape(c_h, c_w, win_size)
winI = ravelImg[win_inds]
win_mu = np.mean(winI, axis=2, keepdims=True)
win_var = np.einsum('...ji,...jk ->...ik', winI, winI) / win_size - np.einsum('...ji,...jk ->...ik', win_mu,
win_mu)
inv = np.linalg.inv(win_var + (eps / win_size) * np.eye(3))
X = np.einsum('...ij,...jk->...ik', winI - win_mu, inv)
vals = (1 / win_size) * (1 + np.einsum('...ij,...kj->...ik', X, winI - win_mu))
nz_indsCol = np.tile(win_inds, win_size).ravel()
nz_indsRow = np.repeat(win_inds, win_size).ravel()
nz_indsVal = vals.ravel()
L = scipy.sparse.coo_matrix((nz_indsVal, (nz_indsRow, nz_indsCol)), shape=(h * w, h * w))
return L
def __replication_padding(self, arr, pad):
h, w, c = arr.shape
ans = np.zeros((h + pad * 2, w + pad * 2, c))
for i in range(c):
ans[:, :, i] = np.pad(arr[:, :, i], pad_width=(pad, pad), mode='edge')
return ans
def __rolling_block(self, A, block=(3, 3)):
shape = (A.shape[0] - block[0] + 1, A.shape[1] - block[1] + 1) + block
strides = (A.strides[0], A.strides[1]) + A.strides
return as_strided(A, shape=shape, strides=strides)
```
#### File: mast/libs/utils.py
```python
import torch
import os
import yaml
from PIL import Image
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import torch.nn.functional as F
matplotlib.use('Agg')
def image_map(img):
"""
:param img: the image with only one channel opened by PIL.Image
:return: an image with the format of PTL.Image
"""
colormap_dict = {0: (0, 0, 0),
1: (128, 0, 0),
2: (0, 128, 0),
3: (128, 128, 0),
4: (0, 0, 128),
5: (128, 0, 128),
6: (0, 128, 128),
7: (128, 128, 128),
8: (64, 0, 0),
9: (192, 0, 0)}
img_cat = np.vectorize(colormap_dict.get)(img)
img_cat1 = np.expand_dims(img_cat[0], axis=2)
img_cat2 = np.expand_dims(img_cat[1], axis=2)
img_cat3 = np.expand_dims(img_cat[2], axis=2)
img = np.concatenate([img_cat1, img_cat2, img_cat3], axis=2)
img = img.astype(np.uint8)
img = Image.fromarray(img)
return img
def image_map1(mask):
colormap = np.array([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0],
[64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128],
[64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0],
[0, 192, 0], [128, 192, 0], [0, 64, 128]])
mask = np.array(mask)
mask_color = colormap[mask].astype(np.uint8)
mask_color = Image.fromarray(mask_color)
return mask_color
def euclidean_dist(x, y):
"""
Args:
x: pytorch Variable, with shape [d, m]
y: pytorch Variable, with shape [d, n]
Returns:
dist: pytorch Variable, with shape [m, n]
"""
x = x.t()
y = y.t()
m, n = x.size(0), y.size(0)
# xx经过pow()方法对每单个数据进行二次方操作后,在axis=1 方向(横向,就是第一列向最后一列的方向)加和,此时xx的shape为(m, 1),经过expand()方法,扩展n-1次,此时xx的shape为(m, n)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
# yy会在最后进行转置的操作
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
# torch.addmm(beta=1, input, alpha=1, mat1, mat2, out=None),这行表示的意思是dist - 2 * x * yT
dist.addmm_(1, -2, x, y.t())
# clamp()函数可以限定dist内元素的最大最小范围,dist最后开方,得到样本之间的距离矩阵
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
return dist
def whiten_and_color(cF, sF):
# cF_size=[c, h, w], sF_size=[c, h, w]
device = cF.device
cFSize = cF.size()
cF = cF.view(cFSize[0], -1)
c_mean = torch.mean(cF, 1) # c x (h x w)
c_mean = c_mean.unsqueeze(1).expand_as(cF)
cF = cF - c_mean
contentConv = torch.mm(cF, cF.t()).div(cFSize[1] * cFSize[2] - 1)
_, c_e, c_v = torch.svd(contentConv, some=False)
k_c = cFSize[0]
for i in range(cFSize[0]):
if c_e[i] < 0.00001:
k_c = i
break
sFSize = sF.size()
sF = sF.view(sFSize[0], -1)
s_mean = torch.mean(sF, 1)
sF = sF - s_mean.unsqueeze(1).expand_as(sF)
styleConv = torch.mm(sF, sF.t()).div(sFSize[1] * sFSize[2] - 1)
_, s_e, s_v = torch.svd(styleConv, some=False)
k_s = sFSize[0]
for i in range(sFSize[0]):
if s_e[i] < 0.00001:
k_s = i
break
c_d = (c_e[0:k_c]).pow(-0.5)
step1 = torch.mm(c_v[:, 0:k_c], torch.diag(c_d))
step2 = torch.mm(step1, (c_v[:, 0:k_c].t()))
whiten_cF = torch.mm(step2, cF)
s_d = (s_e[0:k_s]).pow(0.5)
targetFeature = torch.mm(torch.mm(torch.mm(s_v[:, 0:k_s], torch.diag(s_d)), (s_v[:, 0:k_s].t())), whiten_cF)
print(
f'trace={torch.mm((torch.mm(targetFeature, targetFeature.t()) - torch.mm(sF, sF.t())).t(), (torch.mm(targetFeature, targetFeature.t()) - torch.mm(sF, sF.t()))).trace()}')
print(f'norm={torch.norm((torch.mm(targetFeature, targetFeature.t()) - torch.mm(sF, sF.t()))) ** 2}')
targetFeature = targetFeature + s_mean.unsqueeze(1).expand_as(targetFeature)
targetFeature = targetFeature.view(cFSize[0], cFSize[1], cFSize[2])
return targetFeature
def draw_loss(loss, img_saved_path):
fig = plt.figure()
plt.plot(range(len(loss)), loss)
plt.xlabel('itr')
plt.ylabel('F')
plt.title(os.path.splitext(os.path.basename(img_saved_path))[0])
plt.savefig(img_saved_path)
plt.close(fig)
def batch_split(feature, patch_size, padding=0, stride=1):
"""
:param feature: size = [n,c,h,w]
:param patch_size: (3, 3)
:param padding: 0
:param stride: 1
:return: size = [n, c*kernel_size, L]
"""
if patch_size == (1, 1):
n, c, h, w = feature.size()
feature_unfold = feature.view(n, c, -1)
else:
feature_unfold = F.unfold(feature, kernel_size=patch_size, padding=padding, stride=stride)
# print(f'feature_unfold.size = {feature_unfold.size()}')
return feature_unfold
def batch_concatenate(feature_unfold, origin_size, patch_size, padding=0, stride=1):
"""
:param feature_unfold: size = [n, c*kernel_size, L]
:param origin_size: (h, w)
:param patch_size: (3, 3)
:param padding: 0
:param stride: 1
:return: size = [n, c, h, w]
"""
if patch_size == (1, 1):
n, c, h, w = feature_unfold.size()[0], feature_unfold.size()[1], origin_size[0], origin_size[1]
feature_fold = feature_unfold.view(n, c, h, w)
else:
feature_fold = F.fold(feature_unfold, output_size=origin_size, kernel_size=patch_size, padding=padding,
stride=stride)
ones = torch.ones_like(feature_fold)
ones_unfold = batch_split(ones, patch_size=patch_size)
ones_fold = F.fold(ones_unfold, output_size=origin_size, kernel_size=patch_size, padding=padding, stride=stride)
feature_fold = feature_fold / ones_fold
return feature_fold
def load_seg(content_seg_path, style_seg_path, content_shape, style_shape):
color_codes = ['BLUE', 'GREEN', 'BLACK', 'WHITE', 'RED', 'YELLOW', 'GREY', 'LIGHT_BLUE', 'PURPLE']
def _extract_mask(seg, color_str):
h, w, c = np.shape(seg)
if color_str == "BLUE":
mask_r = (seg[:, :, 0] < 0.1).astype(np.uint8)
mask_g = (seg[:, :, 1] < 0.1).astype(np.uint8)
mask_b = (seg[:, :, 2] > 0.9).astype(np.uint8)
elif color_str == "GREEN":
mask_r = (seg[:, :, 0] < 0.1).astype(np.uint8)
mask_g = (seg[:, :, 1] > 0.9).astype(np.uint8)
mask_b = (seg[:, :, 2] < 0.1).astype(np.uint8)
elif color_str == "BLACK":
mask_r = (seg[:, :, 0] < 0.1).astype(np.uint8)
mask_g = (seg[:, :, 1] < 0.1).astype(np.uint8)
mask_b = (seg[:, :, 2] < 0.1).astype(np.uint8)
elif color_str == "WHITE":
mask_r = (seg[:, :, 0] > 0.9).astype(np.uint8)
mask_g = (seg[:, :, 1] > 0.9).astype(np.uint8)
mask_b = (seg[:, :, 2] > 0.9).astype(np.uint8)
elif color_str == "RED":
mask_r = (seg[:, :, 0] > 0.9).astype(np.uint8)
mask_g = (seg[:, :, 1] < 0.1).astype(np.uint8)
mask_b = (seg[:, :, 2] < 0.1).astype(np.uint8)
elif color_str == "YELLOW":
mask_r = (seg[:, :, 0] > 0.9).astype(np.uint8)
mask_g = (seg[:, :, 1] > 0.9).astype(np.uint8)
mask_b = (seg[:, :, 2] < 0.1).astype(np.uint8)
elif color_str == "GREY":
mask_r = np.multiply((seg[:, :, 0] > 0.4).astype(np.uint8),
(seg[:, :, 0] < 0.6).astype(np.uint8))
mask_g = np.multiply((seg[:, :, 1] > 0.4).astype(np.uint8),
(seg[:, :, 1] < 0.6).astype(np.uint8))
mask_b = np.multiply((seg[:, :, 2] > 0.4).astype(np.uint8),
(seg[:, :, 2] < 0.6).astype(np.uint8))
elif color_str == "LIGHT_BLUE":
mask_r = (seg[:, :, 0] < 0.1).astype(np.uint8)
mask_g = (seg[:, :, 1] > 0.9).astype(np.uint8)
mask_b = (seg[:, :, 2] > 0.9).astype(np.uint8)
elif color_str == "PURPLE":
mask_r = (seg[:, :, 0] > 0.9).astype(np.uint8)
mask_g = (seg[:, :, 1] < 0.1).astype(np.uint8)
mask_b = (seg[:, :, 2] > 0.9).astype(np.uint8)
return np.multiply(np.multiply(mask_r, mask_g), mask_b).astype(np.float32)
# PIL resize has different order of np.shape
content_seg = np.array(Image.open(content_seg_path).convert("RGB").resize(content_shape, resample=Image.BILINEAR),
dtype=np.float32) / 255.0
style_seg = np.array(Image.open(style_seg_path).convert("RGB").resize(style_shape, resample=Image.BILINEAR),
dtype=np.float32) / 255.0
color_content_masks = []
color_style_masks = []
for i in range(len(color_codes)):
color_content_masks.append(torch.from_numpy(_extract_mask(content_seg, color_codes[i])).unsqueeze(0))
color_style_masks.append(torch.from_numpy(_extract_mask(style_seg, color_codes[i])).unsqueeze(0))
color_content_masks = torch.cat(color_content_masks, dim=0)
color_style_masks = torch.cat(color_style_masks, dim=0)
return color_content_masks, color_style_masks
def print_options(args):
args_dict = vars(args)
option_path = os.path.join(args.output_path, 'options.txt')
with open(option_path, 'w+') as f:
print('------------------args---------------------', file=f)
print('------------------args---------------------')
for arg_key in args_dict:
print(f'{arg_key}: {args_dict[arg_key]}', file=f)
print(f'{arg_key}: {args_dict[arg_key]}')
print('-------------------end----------------------', file=f)
print('-------------------end----------------------')
def adjust_learning_rate(optimizer, iteration, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr / (1 + iteration * args.lr_decay_rate)
def adjust_tv_loss_weight(args, iteration, ori_tv_loss_weight):
args.tv_loss_weight = ori_tv_loss_weight / (1 + iteration * args.tv_loss_weight_decay_rate)
def draw_loss(loss_list, content_loss_list, perceptual_loss_list, tv_loss_list, save_path):
fig = plt.figure()
plt.subplot(221)
plt.plot(range(len(loss_list)), loss_list)
plt.xlabel('iteration')
plt.ylabel('loss')
plt.title('loss')
plt.subplot(222)
plt.plot(range(len(content_loss_list)), content_loss_list)
plt.xlabel('iteration')
plt.ylabel('content loss')
plt.title('content loss')
plt.subplot(223)
plt.plot(range(len(perceptual_loss_list)), perceptual_loss_list)
plt.xlabel('iteration')
plt.ylabel('perceptual_loss')
plt.title('perceptual_loss')
plt.subplot(224)
plt.plot(range(len(tv_loss_list)), tv_loss_list)
plt.xlabel('iteration')
plt.ylabel('tv_loss')
plt.title('tv_loss')
# save loss image
plt.tight_layout()
plt.savefig(save_path)
plt.close(fig)
def draw_loss_without_style(loss_list, content_loss_list, tv_loss_list, save_path):
fig = plt.figure()
plt.subplot(131)
plt.plot(range(len(loss_list)), loss_list)
plt.xlabel('iteration')
plt.ylabel('loss')
plt.title('loss')
plt.subplot(132)
plt.plot(range(len(content_loss_list)), content_loss_list)
plt.xlabel('iteration')
plt.ylabel('content loss')
plt.title('content loss')
plt.subplot(133)
plt.plot(range(len(tv_loss_list)), tv_loss_list)
plt.xlabel('iteration')
plt.ylabel('tv_loss')
plt.title('tv_loss')
# save loss image
plt.tight_layout()
plt.savefig(save_path)
plt.close(fig)
def draw_loss_(loss_list, content_loss_list, style_loss_list, tv_loss_list, save_path):
fig = plt.figure()
plt.subplot(221)
plt.plot(range(len(loss_list)), loss_list)
plt.xlabel('iteration')
plt.ylabel('loss')
plt.title('loss')
plt.subplot(222)
plt.plot(range(len(content_loss_list)), content_loss_list)
plt.xlabel('iteration')
plt.ylabel('content loss')
plt.title('content loss')
plt.subplot(223)
plt.plot(range(len(style_loss_list)), style_loss_list)
plt.xlabel('iteration')
plt.ylabel('style_loss')
plt.title('style_loss')
plt.subplot(224)
plt.plot(range(len(tv_loss_list)), tv_loss_list)
plt.xlabel('iteration')
plt.ylabel('tv_loss')
plt.title('tv_loss')
# save loss image
plt.tight_layout()
plt.savefig(save_path)
plt.close(fig)
def draw_loss_1(loss_list, content_loss_list, edge_loss_list, style_loss_list, tv_loss_list, save_path):
fig = plt.figure()
plt.subplot(231)
plt.plot(range(len(loss_list)), loss_list)
plt.xlabel('iteration')
plt.ylabel('loss')
plt.title('loss')
plt.subplot(232)
plt.plot(range(len(content_loss_list)), content_loss_list)
plt.xlabel('iteration')
plt.ylabel('content loss')
plt.title('content loss')
plt.subplot(233)
plt.plot(range(len(edge_loss_list)), edge_loss_list)
plt.xlabel('iteration')
plt.ylabel('edge_loss')
plt.title('edge_loss')
plt.subplot(234)
plt.plot(range(len(style_loss_list)), style_loss_list)
plt.xlabel('iteration')
plt.ylabel('style_loss')
plt.title('style_loss')
plt.subplot(235)
plt.plot(range(len(tv_loss_list)), tv_loss_list)
plt.xlabel('iteration')
plt.ylabel('tv_loss')
plt.title('tv_loss')
# save loss image
plt.tight_layout()
plt.savefig(save_path)
plt.close(fig)
def load_from_yml(args):
if args.config_path == '':
print(f'config path is null!')
return args
file = open(args.config_path)
config = yaml.safe_load(file)
args.description = config['description']
args.config_path = config['config_path']
args.content_img = config['content_img']
args.style_img = config['style_img']
args.content_dir = config['content_dir']
args.style_dir = config['style_dir']
args.segmentation_dir = config['segmentation_dir']
args.use_seg = config['use_seg']
args.resize = config['resize']
args.type = config['type']
args.encoder_path = config['encoder_path']
args.decoder_r11_path = config['decoder_r11_path']
args.decoder_r21_path = config['decoder_r21_path']
args.decoder_r31_path = config['decoder_r31_path']
args.decoder_r41_path = config['decoder_r41_path']
args.decoder_r51_path = config['decoder_r51_path']
args.layers = config['layers']
args.is_batch = config['is_batch']
args.is_combine = config['is_combine']
args.is_select = config['is_select']
args.select_content_list = config['select_content_list']
args.select_style_list = config['select_style_list']
args.orth_constraint = config['orth_constraint']
args.post_smoothing = config['post_smoothing']
args.fast = config['fast']
args.output_path = config['output_path']
args.gpu = config['gpu']
args.device = config['device']
args.max_use_num = config['max_use_num']
args.soft_lambda = config['soft_lambda']
args.k_cross = config['k_cross']
args.patch_size = config['patch_size']
args.style_weight = config['style_weight']
args.reduce_dim_type = config['reduce_dim_type']
args.dist_type = config['dist_type']
args.dim_thresh = config['dim_thresh']
args.skip_connection = config['skip_connection']
args.connect_weight = config['connect_weight']
args.skip_connection_type = config['skip_connection_type']
args.skip_connection_decoder_path = config['skip_connection_decoder_path']
def main():
# size = [1, 512, 64, 64]
# patch_size = (1, 1)
# cf = torch.rand(size=size)
# a = batch_split(cf, patch_size=patch_size)
# res = batch_concatenate(a, origin_size=(64, 64), patch_size=patch_size)
# print(torch.sum(cf - res))
# a = torch.tensor([[[1, 2], [3, 4]],
# [[5, 6], [7, 8]]])
# b = a.reshape(8, 1)
# test adjust_tv_loss_weight()
ori_tv_weight = 1e-6
for iteration in range(1, 19572 * 4 + 1):
print(f'iteration={iteration}, weight={ori_tv_weight / (1 + iteration * 1e-3)}')
if __name__ == '__main__':
main()
# plt.figure(1)
# plt.subplot(221)
# plt.scatter([1, 3, 5], [2, 4, 6])
# plt.title('221')
# plt.xlabel('x1')
# plt.ylabel('y1')
#
# plt.subplot(222)
# plt.plot([1, 3, 5], [2, 4, 6])
# plt.title('222')
# plt.xlabel('x2')
# plt.ylabel('y2')
#
# plt.subplot(223)
# plt.plot([1, 3, 5], [2, 4, 6])
# plt.title('223')
# plt.xlabel('x3')
# plt.ylabel('y3')
#
# plt.subplot(224)
# plt.scatter([1, 3, 5], [2, 4, 6])
# plt.title('224')
# plt.xlabel('x4')
# plt.ylabel('y4')
#
# plt.tight_layout()
# plt.savefig('res.png')
```
#### File: JinShiyin/sast_backend/sockets.py
```python
import time
import os
from flask import session
from flask_socketio import (
SocketIO,
disconnect,
join_room,
leave_room,
emit, Namespace
)
from flask import Flask, render_template, request
from config.config import Config
from config import Config
import eventlet
eventlet.monkey_patch(thread=False)
import logging
logger = logging.getLogger('gunicorn.error')
socketio = SocketIO(async_mode='eventlet', cors_allowed_origins="*")
@socketio.on('connect')
def connect():
sid = request.sid
print(f'Socket connection created with {sid}')
@socketio.on('disconnect')
def disconnect():
sid = request.sid
print(f'Socket disconnected from {sid}')
def synthesis_complete(body):
"""
notify frontend that a stylization image is obtainable.
:param body: {
'content_id': content_id,
'style_id': style_id,
'stylization_id': stylization_id,
}
:return:
"""
print(f'notify fronted synthesis completely with body: {body}')
socketio.emit('onSynthesisCompleted', body, broadcast=True)
def synthesis_failed(body):
"""
notify frontend that a stylization image is obtainable.
:param body: {
'content_id': content_id,
'style_id': style_id,
'stylization_id': stylization_id,
}
:return:
"""
print(f'notify fronted synthesis failed with body: {body}')
socketio.emit('onSynthesisFailed', body, broadcast=True)
def synthesising(body):
"""
notify frontend with current synthesis progress.
The frontend mustn't fetching stylization image from backend if the image's status is 'SYNTHESISING'
:param body: {
'content_id': content_id,
'style_id': style_id,
'stylization_id': stylization_id,
'current_update_steps': 100,
'current_cost_time': 200,
'percent': 0.35, # 1 represent 'COMPLETE',otherwise it is 'SYNTHESISING',
'total_time': 10,
'total_update_steps': 10,
}
:return:
"""
print(f'notify fronted synthesis with body: {body}')
socketio.emit('onSynthesising', body, broadcast=True)
def mast_report(msg, res_queue):
start_time = time.time()
c_basename = os.path.splitext(msg['content_img_id'])[0]
s_basename = os.path.splitext(msg['style_img_id'])[0]
stylization_id = f'{c_basename}_{s_basename}.png'
while True:
if not res_queue.empty():
res_msg = res_queue.get()
body = {
'content_id': res_msg['content_img_id'],
'style_id': res_msg['style_img_id'],
'stylization_id': res_msg['stylized_img_id'],
}
synthesis_complete(body)
break
else:
time.sleep(0.5)
cost_time = time.time() - start_time
body = {
'content_id': msg['content_img_id'],
'style_id': msg['style_img_id'],
'stylization_id': stylization_id,
'current_update_steps': -1,
'current_cost_time': cost_time,
'percent': round(cost_time / Config.MAST_TOTAL_TIME * 100, 1),
# 1 represent 'COMPLETE',otherwise it is 'SYNTHESISING',
'total_time': Config.MAST_TOTAL_TIME,
'total_update_steps': -1,
}
synthesising(body)
``` |
{
"source": "JinshuChen/SteelDetection",
"score": 3
} |
#### File: SteelDetection/scripts/data_argumentation.py
```python
import os
from impy.ObjectDetectionDataset import *
# def main():
# # Define the path to images and annotations
# images_path = "/home/c/workspace/tf_models/research/SteelDetection/impy_train_data/circle_steel/img"
# annotations_path = "/home/c/workspace/tf_models/research/SteelDetection/impy_train_data/circle_steel/annotations"
# # Define the name of the dataset
# dbName = "circle_steel"
# # Create an object of ImageLocalizationDataset
# imda = ObjectDetectionDataset(imagesDirectory = images_path, annotationsDirectory = annotations_path, databaseName = dbName)
# # Reduce the dataset to smaller Rois of smaller ROIs of shape 1032x1032.
# images_output_path = "/home/c/workspace/tf_models/research/SteelDetection/impy_train_data/circle_steel/img_adapted"
# annotations_output_path = "/home/c/workspace/tf_models/research/SteelDetection/impy_train_data/circle_steel/annotations_adapted"
# imda.reduceDatasetByRois(offset = [640, 640], outputImageDirectory = images_output_path, outputAnnotationDirectory = annotations_output_path)
# if __name__ == "__main__":
# main()
def main():
# Define the path to images and annotations
images_path = "/home/c/workspace/tf_models/research/SteelDetection/impy_train_data/NHsquare_steel/img_adapted"
annotations_path = "/home/c/workspace/tf_models/research/SteelDetection/impy_train_data/NHsquare_steel/annotations_adapted"
# Define the name of the dataset
dbName = "NHsquare_steel"
# Create an object of ImageLocalizationDataset
imda = ObjectDetectionDataset(imagesDirectory = images_path, annotationsDirectory = annotations_path, databaseName = dbName)
# Apply data augmentation by using the following method of the ImageLocalizationDataset class.
configuration_file = "/home/c/workspace/tf_models/research/SteelDetection/impy_train_data/config2.json"
images_output_path = "/home/c/workspace/tf_models/research/SteelDetection/impy_train_data/NHsquare_steel/img_adapted"
annotations_output_path = "/home/c/workspace/tf_models/research/SteelDetection/impy_train_data/NHsquare_steel/annotations_adapted"
imda.applyDataAugmentation(configurationFile = configuration_file, outputImageDirectory = images_output_path, outputAnnotationDirectory = annotations_output_path)
if __name__ == "__main__":
main()
``` |
{
"source": "jinsian/VizAly-Foresight",
"score": 3
} |
#### File: pat/hacc/hacc_query.py
```python
import sys
import argparse
import numpy
from pat.utils import gioSqlite as gio_sqlite
def load_sqlite_data(path, query, sqlite_file):
""" Loads data using SQLite query.
"""
# load file
print("Reading {}...".format(path))
query_mgr = gio_sqlite.GioSqlite3()
query_mgr.loadGIOSqlite(sqlite_file)
# load data
i = 0
table_name = "foo_{}".format(i)
query_mgr.createTable(table_name, (path))
# execute query
query = query.replace("__TABLE__", table_name)
result = query_mgr.runQueryOutputList(query)
# typecast
result = numpy.array(result).flatten()
assert(len(result.shape) == 1)
return result
# parse command line
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--input-file", required=True)
parser.add_argument("--output-file", required=True)
parser.add_argument("--sqlite-file", default="/projects/exasky/visio/genericio/frontend/GenericIOSQLite.so")
parser.add_argument("--query", default="select fof_halo_mass from __TABLE__ ORDER BY fof_halo_mass")
parser.add_argument("--xlabel", default="Halo Mass")
parser.add_argument("--ylabel", default="Counts")
parser.add_argument("--xlim", nargs="+", type=float, default=[])
parser.add_argument("--bins", type=float, default=20)
parser.add_argument("--log-bins", action="store_true")
opts = parser.parse_args()
# read data
data = numpy.array(load_sqlite_data(opts.input_file, opts.query, opts.sqlite_file))
# update ranges
x_min = min(x_min, data.min()) if not len(opts.xlim) > 0 else opts.xlim[0]
x_max = max(x_max, data.max()) if not len(opts.xlim) > 1 else opts.xlim[1]
# set binning and range of histograms
# can do uniform in linear space or logarithmic space
if opts.log_bins:
bins = numpy.logspace(numpy.log10(x_min), numpy.log10(x_max), num=opts.bins)
bins_range = None
else:
bins = opts.bins
bins_range = (x_min, x_max)
# create histogram
hist, bin_edges = numpy.histogram(data, bins=bins, range=bins_range)
hist = numpy.hstack([(0), numpy.repeat(hist, 2), (0)])
bin_edges = numpy.hstack([(bin_edges[0], bin_edges[0]),
numpy.repeat(bin_edges[1:-1], 2),
(bin_edges[-1], bin_edges[-1])])
# save results
delimiter = ","
results = numpy.column_stack([bin_edges, hist])
header = delimiter.join(map(str, [opts.xlabel, opts.ylabel]))
numpy.savetxt(opts.output_file, results, header=header, delimiter=delimiter)
print("Done!")
```
#### File: pat/nyx/cinema.py
```python
import sys
import argparse
import os
import csv
import operator
from pat.utils import file_utilities as futils
from pat.utils import plot_utilities as putils
from pat.utils import cinema
from pat.utils import job as j
class nyx_cinema(cinema.CinemaWorkflow):
def prepare_cinema(self):
# Open CSV file
if "metrics-csv" in self.json_data["input"]:
metrics_csv = self.json_data["input"]["metrics-csv"]
else:
metrics_csv = self.json_data["project-home"] + self.json_data['wflow-path'] + "/cbench/" + self.json_data['cbench']['output']['metrics-file'] + ".csv"
output_file_name = self.json_data["project-home"] + self.json_data['wflow-path'] + "/cinema/" + "data.csv"
all = []
#reader = futils.open_csv_file(metrics_csv)
with open(metrics_csv,'r') as csvinput:
reader = csv.reader(csvinput)
# Modify Cinema files
row = next(reader)
row.append('FILE_SimStats_Pk')
row.append('FILE_lya_all_axes_x_Pk')
row.append('FILE_lya_all_axes_y_Pk')
row.append('FILE_lya_all_axes_z_Pk')
all.append(row)
values = ["sim_stats_rhob.png", "sim_stats_rhodm.png", "sim_stats_temp.png", "sim_stats_velmag.png", "sim_stats_velmag.png", "sim_stats_vz.png"]
count = 0
for row in reader:
row.append(values[count])
row.append("lya_all_axes_x.png")
row.append("lya_all_axes_y.png")
row.append("lya_all_axes_z.png")
all.append(row)
count = count + 1
if (count == 6):
count = 0
futils.write_csv(output_file_name, all)
# Converts a string to an operator
def validate(self, operand, relate, result):
ops = { "<": operator.lt }
return ops[relate]( abs(operand-1.0), result)
# Process checks found in "cinema-plots" "plotting" "checks"
def is_valid(self, pk_ratio, range_count):
valid = True
for check in self.json_data['cinema-plots']['plotting']['checks']:
count = 0
for item in pk_ratio:
if count > range_count: # we only care about the set range
break;
if not self.validate(item, check['operator'], check['result']):
valid = False
break
count = count + 1
return valid
def create_plots(self):
output_path = self.json_data['project-home'] + self.json_data['wflow-path']
output_plot_path = output_path + "/plots"
has_range = False;
if "x-range" in self.json_data['cinema-plots']['plotting']:
x_range = self.json_data['cinema-plots']['plotting']['x-range']
has_range = True
for ana in self.json_data['pat']['analysis']:
plot_title = ana['title']
to_plot = [] # all the items to plot
k_list = []
orig_pk = []
# Find the original file
for file in ana['files']:
if (file['name']=="orig"):
k_list = futils.extract_csv_col(file['path'], ' ', 2)
orig_pk = futils.extract_csv_col(file['path'], ' ', 3)
# Check range limit
range_count = 0
for x in k_list:
if has_range == True:
if (x > x_range[1]):
break
else:
has_range = 1000000
range_count = range_count + 1
# Process the other files
for file in ana['files']:
if (file['name']!="orig"):
temp_pk = futils.extract_csv_col(file['path'], ' ', 3)
if (temp_pk is not None):
pk_ratio = [i / j for i, j in zip(temp_pk, orig_pk)]
this_tuple = (pk_ratio, file['name']) #array, name
# Check if passes test
if "checks" in self.json_data["cinema-plots"]["plotting"]:
if self.is_valid(pk_ratio, range_count):
to_plot.append(this_tuple)
else:
to_plot.append(this_tuple)
putils.plotScatterGraph(k_list, 'k', 'pk-ratio', plot_title, output_plot_path, x_range, to_plot)
# Parse Input
parser = argparse.ArgumentParser()
parser.add_argument("--input-file")
opts = parser.parse_args()
# Create Cinema DB
cinema = nyx_cinema( opts.input_file )
cinema.create_plots()
cinema.create_cinema()
```
#### File: pat/utils/gioSqlite.py
```python
import sys
import io
dbConnLoaded = ""
try:
import apsw
dbConnLoaded = "apsw"
print("using aspw module for sqlite")
except ImportError:
try:
import sqlite3
dbConnLoaded = "sqlite3"
print("using sqlite3 module for sqlite")
except ImportError:
print ("You need either apsw or sqlite3")
sys.exit(0)
class GioSqlite3:
def __init__(self):
if dbConnLoaded == "sqlite3":
self.conn = sqlite3.connect(":memory:")
else:
self.conn = apsw.Connection(":memory:")
def __del__(self):
self.closeConn()
def loadGIOSqlite(self, sharedObjectPath):
try:
if dbConnLoaded == "sqlite3":
self.conn.enable_load_extension(True)
self.conn.load_extension(sharedObjectPath)
else:
self.conn.enableloadextension(True)
self.conn.loadextension(sharedObjectPath)
except Exception:
print ("Could not load shared object", sharedObjectPath, "!")
return -1
def createTable(self, tableName, inputfile):
query = "CREATE VIRTUAL TABLE " + tableName + " USING GenericIO('"+ inputfile +"')"
try:
self.conn.cursor().execute(query)
except Exception:
print ("Could not create table", tableName, "!")
return -1
def runQueryInteractive(self, queryString):
for row in self.conn.cursor().execute(queryString):
print (row)
def runQueryOutputFile(self, queryString, outputFilename):
target = open(outputFilename, 'w')
for row in self.conn.cursor().execute(queryString):
target.write( str(row) + '\n')
def runQueryOutputString(self, queryString):
outputString = ""
cursor = self.conn.cursor().execute(queryString)
results = cursor.fetchall()
for row in results:
outputString = outputString + str( row ) + " "'\n'
return outputString
def runQueryOutputCSV(self, queryString):
if dbConnLoaded != "apsw":
print ("csv currenly only works with apsw! Running non csv version")
return self.runQueryOutputString(queryString)
else:
output=io.StringIO()
self.shell = apsw.Shell(stdout=output, db=self.conn)
self.shell.process_command(".mode csv")
self.shell.process_command(".headers on")
self.shell.process_sql(queryString)
return output.getvalue()
def runQueryOutputList(self, queryString):
outputString = ""
cursor = self.conn.cursor().execute(queryString)
row = cursor.fetchone()
resultsList = []
while row is not None:
resultsList.append(row)
row = cursor.fetchone()
return resultsList
def getNumRanks(self, tableName):
query = "SELECT MAX(_rank) FROM " + tableName
cursor = self.conn.cursor().execute(query)
return cursor.fetchone()[0]
def closeConn(self):
self.conn.close()
``` |
{
"source": "jinsihou19/E-Paper-UI-Kit",
"score": 3
} |
#### File: jinsihou19/E-Paper-UI-Kit/demo.py
```python
from PIL import ImageFont
from EPUIKit import QuadrantsLayout, VirtualPaper
from EPUIKit.widget import Label
def font():
return ImageFont.load_default()
layout = QuadrantsLayout(border=1)
layout.add(Label('LEFT_BOTTOM', font=font()), QuadrantsLayout.LEFT_TOP)
layout.add(Label('LEFT_BOTTOM', font=font()), QuadrantsLayout.LEFT_BOTTOM)
layout.add(Label('RIGHT_TOP', font=font()), QuadrantsLayout.RIGHT_TOP)
layout.add(Label('RIGHT_BOTTOM', font=font()), QuadrantsLayout.RIGHT_BOTTOM)
paper = VirtualPaper(layout, 144, 500)
paper.show()
```
#### File: E-Paper-UI-Kit/EPUIKit/date.py
```python
import time
from interval import Interval
def get_week_day(day):
week_day_dict = {
1: '一',
2: '二',
3: '三',
4: '四',
5: '五',
6: '六',
0: '日',
}
return week_day_dict[day]
def date_str():
return "{} {}".format(time.strftime("%Y/%m/%d"), get_week_day(int(time.strftime("%w"))))
def is_stock_time():
return time.strftime("%H:%M") in Interval("09:30", "15:00")
def is_screen_work_time():
return time.strftime("%H:%M") in Interval("06:00", "24:00")
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.