max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
parser/fase2/team20/execution/executeSentence2.py | LopDlMa/tytus | 0 | 10000 | <gh_stars>0
from .AST.sentence import *
from .AST.expression import *
from .AST.error import *
import sys
sys.path.append("../")
from console import *
def executeSentence2(self, sentence):
if isinstance(sentence, CreateDatabase):
h=0
elif isinstance(sentence, ShowDatabases):
h=0
elif isinstance(sentence, DropDatabase):
h=0
elif isinstance(sentence,Use):
h=0
elif isinstance(sentence,CreateTable):
h=0
elif isinstance(sentence, CreateType):
h=0
elif isinstance(sentence, InsertAll):
h=0
elif isinstance(sentence, Insert):
h=0
elif isinstance(sentence, Delete):
archivo = open("C3D.py", 'a')
archivo.write("\n")
archivo.write("ICreateDatabase("+sentence.name+","+sentence.ifNotExistsFlag+","+sentence.OrReplace+","+sentence.OwnerMode+")")
archivo.close()
elif isinstance(sentence,Select):
print(sentence.columns)
#print(sentence.columns[0].function)
#print(sentence.columns[0].expression)
print(sentence.tables)
print(sentence.options)
elif isinstance(sentence,DropTable):
h=0
elif isinstance(sentence,AlterDatabaseRename):
archivo = open("C3D.py", 'a')
archivo.write("\n")
archivo.write("ICreateDatabase("+sentence.name+","+sentence.ifNotExistsFlag+","+sentence.OrReplace+","+sentence.OwnerMode+")")
archivo.close()
elif isinstance(sentence,Update):
h=0
elif isinstance(sentence,AlterTableDropConstraint):
archivo = open("C3D.py", 'a')
archivo.write("\n")
archivo.write("ICreateDatabase("+sentence.name+","+sentence.ifNotExistsFlag+","+sentence.OrReplace+","+sentence.OwnerMode+")")
archivo.close()
elif isinstance(sentence,AlterTableAlterColumnType):
h=0
elif isinstance(sentence, AlterTableAddColumn):
h=0
elif isinstance(sentence, AlterTableDropColumn):
archivo = open("C3D.py", 'a')
archivo.write("\n")
archivo.write("ICreateDatabase("+sentence.name+","+sentence.ifNotExistsFlag+","+sentence.OrReplace+","+sentence.OwnerMode+")")
archivo.close()
| from .AST.sentence import *
from .AST.expression import *
from .AST.error import *
import sys
sys.path.append("../")
from console import *
def executeSentence2(self, sentence):
if isinstance(sentence, CreateDatabase):
h=0
elif isinstance(sentence, ShowDatabases):
h=0
elif isinstance(sentence, DropDatabase):
h=0
elif isinstance(sentence,Use):
h=0
elif isinstance(sentence,CreateTable):
h=0
elif isinstance(sentence, CreateType):
h=0
elif isinstance(sentence, InsertAll):
h=0
elif isinstance(sentence, Insert):
h=0
elif isinstance(sentence, Delete):
archivo = open("C3D.py", 'a')
archivo.write("\n")
archivo.write("ICreateDatabase("+sentence.name+","+sentence.ifNotExistsFlag+","+sentence.OrReplace+","+sentence.OwnerMode+")")
archivo.close()
elif isinstance(sentence,Select):
print(sentence.columns)
#print(sentence.columns[0].function)
#print(sentence.columns[0].expression)
print(sentence.tables)
print(sentence.options)
elif isinstance(sentence,DropTable):
h=0
elif isinstance(sentence,AlterDatabaseRename):
archivo = open("C3D.py", 'a')
archivo.write("\n")
archivo.write("ICreateDatabase("+sentence.name+","+sentence.ifNotExistsFlag+","+sentence.OrReplace+","+sentence.OwnerMode+")")
archivo.close()
elif isinstance(sentence,Update):
h=0
elif isinstance(sentence,AlterTableDropConstraint):
archivo = open("C3D.py", 'a')
archivo.write("\n")
archivo.write("ICreateDatabase("+sentence.name+","+sentence.ifNotExistsFlag+","+sentence.OrReplace+","+sentence.OwnerMode+")")
archivo.close()
elif isinstance(sentence,AlterTableAlterColumnType):
h=0
elif isinstance(sentence, AlterTableAddColumn):
h=0
elif isinstance(sentence, AlterTableDropColumn):
archivo = open("C3D.py", 'a')
archivo.write("\n")
archivo.write("ICreateDatabase("+sentence.name+","+sentence.ifNotExistsFlag+","+sentence.OrReplace+","+sentence.OwnerMode+")")
archivo.close() | en | 0.11865 | #print(sentence.columns[0].function) #print(sentence.columns[0].expression) | 2.578078 | 3 |
src/test/cli/component.py | huseyinbolt/cord-tester | 0 | 10001 | <filename>src/test/cli/component.py
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2016-present Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Created on 24-Oct-2012
author:s: <NAME> ( <EMAIL> ),
<NAME>( <EMAIL> )
TestON is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
( at your option ) any later version.
TestON is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TestON. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
from clicommon import *
class Component( object ):
"""
This is the tempalte class for components
"""
def __init__( self ):
self.default = ''
self.wrapped = sys.modules[ __name__ ]
self.count = 0
def __getattr__( self, name ):
"""
This will invoke, if the attribute wasn't found the usual ways.
Here it will look for assert_attribute and will execute when
AttributeError occurs.
It will return the result of the assert_attribute.
"""
try:
return getattr( self.wrapped, name )
except AttributeError as error:
# NOTE: The first time we load a driver module we get this error
if "'module' object has no attribute '__path__'" in error:
pass
else:
main.log.error( str(error.__class__) + " " + str(error) )
try:
def experimentHandling( *args, **kwargs ):
if main.EXPERIMENTAL_MODE == main.TRUE:
result = self.experimentRun( *args, **kwargs )
main.log.info( "EXPERIMENTAL MODE. API " +
str( name ) +
" not yet implemented. " +
"Returning dummy values" )
return result
else:
return main.FALSE
return experimentHandling
except TypeError as e:
main.log.error( "Arguments for experimental mode does not" +
" have key 'retruns'" + e )
def connect( self ):
vars( main )[ self.name + 'log' ] = logging.getLogger( self.name )
session_file = main.logdir + "/" + self.name + ".session"
self.log_handler = logging.FileHandler( session_file )
self.log_handler.setLevel( logging.DEBUG )
vars( main )[ self.name + 'log' ].setLevel( logging.DEBUG )
_formatter = logging.Formatter(
"%(asctime)s %(name)-10s: %(levelname)-8s: %(message)s" )
self.log_handler.setFormatter( _formatter )
vars( main )[ self.name + 'log' ].addHandler( self.log_handler )
# Adding header for the component log
vars( main )[ self.name + 'log' ].info( main.logHeader )
# Opening the session log to append command's execution output
self.logfile_handler = open( session_file, "w" )
return "Dummy"
def execute( self, cmd ):
return main.TRUE
# import commands
# return commands.getoutput( cmd )
def disconnect( self ):
return main.TRUE
def config( self ):
self = self
# Need to update the configuration code
def cleanup( self ):
return main.TRUE
def log( self, message ):
"""
Here finding the for the component to which the
log message based on the called child object.
"""
vars( main )[ self.name + 'log' ].info( "\n" + message + "\n" )
def close_log_handles( self ):
vars( main )[ self.name + 'log' ].removeHandler( self.log_handler )
if self.logfile_handler:
self.logfile_handler.close()
def get_version( self ):
return "Version unknown"
def experimentRun( self, *args, **kwargs ):
# FIXME handle *args
args = utilities.parse_args( [ "RETURNS" ], **kwargs )
return args[ "RETURNS" ]
if __name__ != "__main__":
import sys
sys.modules[ __name__ ] = Component()
| <filename>src/test/cli/component.py
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2016-present Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Created on 24-Oct-2012
author:s: <NAME> ( <EMAIL> ),
<NAME>( <EMAIL> )
TestON is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
( at your option ) any later version.
TestON is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TestON. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
from clicommon import *
class Component( object ):
"""
This is the tempalte class for components
"""
def __init__( self ):
self.default = ''
self.wrapped = sys.modules[ __name__ ]
self.count = 0
def __getattr__( self, name ):
"""
This will invoke, if the attribute wasn't found the usual ways.
Here it will look for assert_attribute and will execute when
AttributeError occurs.
It will return the result of the assert_attribute.
"""
try:
return getattr( self.wrapped, name )
except AttributeError as error:
# NOTE: The first time we load a driver module we get this error
if "'module' object has no attribute '__path__'" in error:
pass
else:
main.log.error( str(error.__class__) + " " + str(error) )
try:
def experimentHandling( *args, **kwargs ):
if main.EXPERIMENTAL_MODE == main.TRUE:
result = self.experimentRun( *args, **kwargs )
main.log.info( "EXPERIMENTAL MODE. API " +
str( name ) +
" not yet implemented. " +
"Returning dummy values" )
return result
else:
return main.FALSE
return experimentHandling
except TypeError as e:
main.log.error( "Arguments for experimental mode does not" +
" have key 'retruns'" + e )
def connect( self ):
vars( main )[ self.name + 'log' ] = logging.getLogger( self.name )
session_file = main.logdir + "/" + self.name + ".session"
self.log_handler = logging.FileHandler( session_file )
self.log_handler.setLevel( logging.DEBUG )
vars( main )[ self.name + 'log' ].setLevel( logging.DEBUG )
_formatter = logging.Formatter(
"%(asctime)s %(name)-10s: %(levelname)-8s: %(message)s" )
self.log_handler.setFormatter( _formatter )
vars( main )[ self.name + 'log' ].addHandler( self.log_handler )
# Adding header for the component log
vars( main )[ self.name + 'log' ].info( main.logHeader )
# Opening the session log to append command's execution output
self.logfile_handler = open( session_file, "w" )
return "Dummy"
def execute( self, cmd ):
return main.TRUE
# import commands
# return commands.getoutput( cmd )
def disconnect( self ):
return main.TRUE
def config( self ):
self = self
# Need to update the configuration code
def cleanup( self ):
return main.TRUE
def log( self, message ):
"""
Here finding the for the component to which the
log message based on the called child object.
"""
vars( main )[ self.name + 'log' ].info( "\n" + message + "\n" )
def close_log_handles( self ):
vars( main )[ self.name + 'log' ].removeHandler( self.log_handler )
if self.logfile_handler:
self.logfile_handler.close()
def get_version( self ):
return "Version unknown"
def experimentRun( self, *args, **kwargs ):
# FIXME handle *args
args = utilities.parse_args( [ "RETURNS" ], **kwargs )
return args[ "RETURNS" ]
if __name__ != "__main__":
import sys
sys.modules[ __name__ ] = Component()
| en | 0.833798 | # Copyright 2017-present Open Networking Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright 2016-present Ciena Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Created on 24-Oct-2012 author:s: <NAME> ( <EMAIL> ), <NAME>( <EMAIL> ) TestON is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or ( at your option ) any later version. TestON is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with TestON. If not, see <http://www.gnu.org/licenses/>. This is the tempalte class for components This will invoke, if the attribute wasn't found the usual ways. Here it will look for assert_attribute and will execute when AttributeError occurs. It will return the result of the assert_attribute. # NOTE: The first time we load a driver module we get this error # Adding header for the component log # Opening the session log to append command's execution output # import commands # return commands.getoutput( cmd ) # Need to update the configuration code Here finding the for the component to which the log message based on the called child object. # FIXME handle *args | 1.402398 | 1 |
python-framework/handlers/base/auth.py | huangxingx/python-framework | 7 | 10002 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: x.huang
# @date:17-8-4
import logging
from pony.orm import db_session
from handlers.base.base import BaseRequestHandler
class LoginRequireError(Exception):
pass
class AuthBaseHandler(BaseRequestHandler):
""" 登录验证的基类 """
def prepare(self):
if not self.current_user and self.request.method.lower() != 'options':
self.render_error('Auth Error.', status_code=401)
super(AuthBaseHandler, self).prepare()
class Authentication(object):
def __init__(self, handler):
self.handler = handler
def admin_auth(self, username, password):
try:
with db_session:
user_obj = self.handler.m_useradmin.get(username=username, is_delete=False)
if user_obj:
is_auth = user_obj.check_password(password)
if is_auth:
user_dict = user_obj.to_dict(exclude=self.handler.m_useradmin.password.column)
user_dict['permission'] = user_obj.role_id.permission if user_obj.role_id else None
return user_dict
else:
return None
except Exception as e:
logging.error(str(e))
return None
def api_auth(self, phone, password, sc_auth=False):
try:
with db_session:
user_obj = self.handler.m_appuser.get(phone=phone, is_delete=False)
if user_obj:
is_auth = False
if password:
is_auth = user_obj.check_password(password)
if sc_auth or is_auth:
user_dict = user_obj.to_dict()
return user_dict
else:
return None
except Exception as e:
logging.error(str(e))
return None
def web_auth(self, username, password):
try:
with db_session:
user_obj = self.handler.m_comuser.get(com_username=username, is_delete=False)
if user_obj:
is_auth = False
if password:
is_auth = user_obj.check_password(password)
if is_auth:
user_dict = user_obj.to_dict()
return user_dict
else:
return None
except Exception as e:
logging.error(str(e))
return None
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: x.huang
# @date:17-8-4
import logging
from pony.orm import db_session
from handlers.base.base import BaseRequestHandler
class LoginRequireError(Exception):
pass
class AuthBaseHandler(BaseRequestHandler):
""" 登录验证的基类 """
def prepare(self):
if not self.current_user and self.request.method.lower() != 'options':
self.render_error('Auth Error.', status_code=401)
super(AuthBaseHandler, self).prepare()
class Authentication(object):
def __init__(self, handler):
self.handler = handler
def admin_auth(self, username, password):
try:
with db_session:
user_obj = self.handler.m_useradmin.get(username=username, is_delete=False)
if user_obj:
is_auth = user_obj.check_password(password)
if is_auth:
user_dict = user_obj.to_dict(exclude=self.handler.m_useradmin.password.column)
user_dict['permission'] = user_obj.role_id.permission if user_obj.role_id else None
return user_dict
else:
return None
except Exception as e:
logging.error(str(e))
return None
def api_auth(self, phone, password, sc_auth=False):
try:
with db_session:
user_obj = self.handler.m_appuser.get(phone=phone, is_delete=False)
if user_obj:
is_auth = False
if password:
is_auth = user_obj.check_password(password)
if sc_auth or is_auth:
user_dict = user_obj.to_dict()
return user_dict
else:
return None
except Exception as e:
logging.error(str(e))
return None
def web_auth(self, username, password):
try:
with db_session:
user_obj = self.handler.m_comuser.get(com_username=username, is_delete=False)
if user_obj:
is_auth = False
if password:
is_auth = user_obj.check_password(password)
if is_auth:
user_dict = user_obj.to_dict()
return user_dict
else:
return None
except Exception as e:
logging.error(str(e))
return None
| en | 0.335607 | #!/usr/bin/env python # -*- coding: utf-8 -*- # @author: x.huang # @date:17-8-4 登录验证的基类 | 2.208761 | 2 |
pysol/core/helpers.py | lotfio/pysol | 2 | 10003 | # -*- coding: utf-8 -*-
#| This file is part of cony
#|
#| @package Pysol python cli application
#| @author <<NAME>>
#| @license MIT
#| @version 0.1.0
#| @copyright 2019 <NAME>
import sys
# load module function
# this function loads a module by string name
def load_module(module):
module_path = module
if module_path in sys.modules:
return sys.modules[module_path]
return __import__(module_path, fromlist=[module]) | # -*- coding: utf-8 -*-
#| This file is part of cony
#|
#| @package Pysol python cli application
#| @author <<NAME>>
#| @license MIT
#| @version 0.1.0
#| @copyright 2019 <NAME>
import sys
# load module function
# this function loads a module by string name
def load_module(module):
module_path = module
if module_path in sys.modules:
return sys.modules[module_path]
return __import__(module_path, fromlist=[module]) | en | 0.263816 | # -*- coding: utf-8 -*- #| This file is part of cony #| #| @package Pysol python cli application #| @author <<NAME>> #| @license MIT #| @version 0.1.0 #| @copyright 2019 <NAME> # load module function # this function loads a module by string name | 2.14629 | 2 |
autograd_hacks/test_autograd_hacks.py | jusjusjus/autograd-hacks | 1 | 10004 | <reponame>jusjusjus/autograd-hacks<gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytest
from . import autograd_hacks
class StriddenNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5, stride=2, padding=2)
self.conv2 = nn.Conv2d(20, 30, 5, stride=2, padding=2)
self.fc1_input_size = 7 * 7 * 30
self.fc1 = nn.Linear(self.fc1_input_size, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
batch_size = x.shape[0]
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(batch_size, self.fc1_input_size)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class SimpleNet(nn.Module):
"""Lenet-5 from https://github.com/pytorch/examples/blob/master/mnist/main.py"""
def __init__(self):
super().__init__()
self.linear = nn.Linear(28 * 28, 10)
def forward(self, x):
x = torch.flatten(x, 1)
return self.linear(x)
class Net(nn.Module):
"""Lenet-5 from https://github.com/pytorch/examples/blob/master/mnist/main.py"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 50, 5)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class TinyNet(nn.Module):
"""Tiny LeNet-5 for Hessian testing"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 2, 1)
self.conv2 = nn.Conv2d(2, 2, 2, 1)
self.fc1 = nn.Linear(2, 2)
self.fc2 = nn.Linear(2, 10)
def forward(self, x): # 28x28
x = F.max_pool2d(x, 4, 4) # 7x7
x = F.relu(self.conv1(x)) # 6x6
x = F.max_pool2d(x, 2, 2) # 3x3
x = F.relu(self.conv2(x)) # 2x2
x = F.max_pool2d(x, 2, 2) # 1x1
x = x.view(-1, 2 * 1 * 1) # C * W * H
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
# Autograd helpers, from https://gist.github.com/apaszke/226abdf867c4e9d6698bd198f3b45fb7
def jacobian(y: torch.Tensor, x: torch.Tensor, create_graph=False):
jac = []
flat_y = y.reshape(-1)
grad_y = torch.zeros_like(flat_y)
for i in range(len(flat_y)):
grad_y[i] = 1.
grad_x, = torch.autograd.grad(flat_y, x, grad_y, retain_graph=True, create_graph=create_graph)
jac.append(grad_x.reshape(x.shape))
grad_y[i] = 0.
return torch.stack(jac).reshape(y.shape + x.shape)
def hessian(y: torch.Tensor, x: torch.Tensor):
return jacobian(jacobian(y, x, create_graph=True), x)
@pytest.mark.parametrize("Net", [Net, TinyNet, SimpleNet, StriddenNet])
def test_grad1(Net):
torch.manual_seed(1)
model = Net()
loss_fn = nn.CrossEntropyLoss()
n = 4
data = torch.rand(n, 1, 28, 28)
targets = torch.LongTensor(n).random_(0, 10)
autograd_hacks.add_hooks(model)
output = model(data)
loss_fn(output, targets).backward(retain_graph=True)
autograd_hacks.compute_grad1(model)
autograd_hacks.disable_hooks()
# Compare values against autograd
losses = torch.stack([loss_fn(output[i:i+1], targets[i:i+1])
for i in range(len(data))])
for layer in model.modules():
if not autograd_hacks.is_supported(layer):
continue
for param in layer.parameters():
assert torch.allclose(param.grad, param.grad1[0].mean(dim=0))
assert torch.allclose(jacobian(losses, param), param.grad1[0])
def test_applying_backwards_twice_fails():
torch.manual_seed(42)
model = Net()
loss_fn = nn.CrossEntropyLoss()
data = torch.rand(5, 1, 28, 28)
targets = torch.LongTensor(5).random_(0, 10)
autograd_hacks.add_hooks(model)
output = model(data)
loss_fn(output, targets).backward()
output = model(data)
with pytest.raises(AssertionError):
loss_fn(output, targets).backward()
def test_grad1_for_multiple_connected_passes():
torch.manual_seed(42)
model = SimpleNet()
loss_fn = nn.CrossEntropyLoss(reduction='sum')
def get_data(batch_size):
return (torch.rand(batch_size, 1, 28, 28),
torch.LongTensor(batch_size).random_(0, 10))
n = 5
autograd_hacks.add_hooks(model)
data, targets = get_data(n)
output = model(data)
loss1 = loss_fn(output, targets)
data, targets = get_data(n)
output = model(data)
loss2 = loss_fn(output, targets)
loss = loss1 - loss2
loss.backward()
autograd_hacks.compute_grad1(model)
autograd_hacks.disable_hooks()
for n, p in model.named_parameters():
grad1 = p.grad1[0] + p.grad1[1]
assert p.grad.shape == grad1.shape[1:]
assert torch.allclose(p.grad, grad1.mean(dim=0), atol=1e-7)
@pytest.mark.parametrize("hess_type", ['CrossEntropy', 'LeastSquares'])
def test_hess(hess_type):
torch.manual_seed(1)
model = TinyNet()
def least_squares_loss(data_, targets_):
assert len(data_) == len(targets_)
err = data_ - targets_
return torch.sum(err * err) / 2 / len(data_)
n = 3
data = torch.rand(n, 1, 28, 28)
autograd_hacks.add_hooks(model)
output = model(data)
if hess_type == 'LeastSquares':
targets = torch.rand(output.shape)
loss_fn = least_squares_loss
elif hess_type == 'CrossEntropy':
targets = torch.LongTensor(n).random_(0, 10)
loss_fn = nn.CrossEntropyLoss()
else:
raise ValueError(f"Unknown hessian type")
autograd_hacks.backprop_hess(output, hess_type)
autograd_hacks.clear_backprops(model)
autograd_hacks.backprop_hess(output, hess_type)
autograd_hacks.compute_hess(model)
autograd_hacks.disable_hooks()
for layer in model.modules():
if not autograd_hacks.is_supported(layer):
continue
for param in layer.parameters():
loss = loss_fn(output, targets)
hess_autograd = hessian(loss, param)
hess = param.hess
assert torch.allclose(hess, hess_autograd.reshape(hess.shape))
| import torch
import torch.nn as nn
import torch.nn.functional as F
import pytest
from . import autograd_hacks
class StriddenNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5, stride=2, padding=2)
self.conv2 = nn.Conv2d(20, 30, 5, stride=2, padding=2)
self.fc1_input_size = 7 * 7 * 30
self.fc1 = nn.Linear(self.fc1_input_size, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
batch_size = x.shape[0]
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(batch_size, self.fc1_input_size)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class SimpleNet(nn.Module):
"""Lenet-5 from https://github.com/pytorch/examples/blob/master/mnist/main.py"""
def __init__(self):
super().__init__()
self.linear = nn.Linear(28 * 28, 10)
def forward(self, x):
x = torch.flatten(x, 1)
return self.linear(x)
class Net(nn.Module):
"""Lenet-5 from https://github.com/pytorch/examples/blob/master/mnist/main.py"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 50, 5)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class TinyNet(nn.Module):
"""Tiny LeNet-5 for Hessian testing"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 2, 2, 1)
self.conv2 = nn.Conv2d(2, 2, 2, 1)
self.fc1 = nn.Linear(2, 2)
self.fc2 = nn.Linear(2, 10)
def forward(self, x): # 28x28
x = F.max_pool2d(x, 4, 4) # 7x7
x = F.relu(self.conv1(x)) # 6x6
x = F.max_pool2d(x, 2, 2) # 3x3
x = F.relu(self.conv2(x)) # 2x2
x = F.max_pool2d(x, 2, 2) # 1x1
x = x.view(-1, 2 * 1 * 1) # C * W * H
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
# Autograd helpers, from https://gist.github.com/apaszke/226abdf867c4e9d6698bd198f3b45fb7
def jacobian(y: torch.Tensor, x: torch.Tensor, create_graph=False):
jac = []
flat_y = y.reshape(-1)
grad_y = torch.zeros_like(flat_y)
for i in range(len(flat_y)):
grad_y[i] = 1.
grad_x, = torch.autograd.grad(flat_y, x, grad_y, retain_graph=True, create_graph=create_graph)
jac.append(grad_x.reshape(x.shape))
grad_y[i] = 0.
return torch.stack(jac).reshape(y.shape + x.shape)
def hessian(y: torch.Tensor, x: torch.Tensor):
return jacobian(jacobian(y, x, create_graph=True), x)
@pytest.mark.parametrize("Net", [Net, TinyNet, SimpleNet, StriddenNet])
def test_grad1(Net):
torch.manual_seed(1)
model = Net()
loss_fn = nn.CrossEntropyLoss()
n = 4
data = torch.rand(n, 1, 28, 28)
targets = torch.LongTensor(n).random_(0, 10)
autograd_hacks.add_hooks(model)
output = model(data)
loss_fn(output, targets).backward(retain_graph=True)
autograd_hacks.compute_grad1(model)
autograd_hacks.disable_hooks()
# Compare values against autograd
losses = torch.stack([loss_fn(output[i:i+1], targets[i:i+1])
for i in range(len(data))])
for layer in model.modules():
if not autograd_hacks.is_supported(layer):
continue
for param in layer.parameters():
assert torch.allclose(param.grad, param.grad1[0].mean(dim=0))
assert torch.allclose(jacobian(losses, param), param.grad1[0])
def test_applying_backwards_twice_fails():
torch.manual_seed(42)
model = Net()
loss_fn = nn.CrossEntropyLoss()
data = torch.rand(5, 1, 28, 28)
targets = torch.LongTensor(5).random_(0, 10)
autograd_hacks.add_hooks(model)
output = model(data)
loss_fn(output, targets).backward()
output = model(data)
with pytest.raises(AssertionError):
loss_fn(output, targets).backward()
def test_grad1_for_multiple_connected_passes():
torch.manual_seed(42)
model = SimpleNet()
loss_fn = nn.CrossEntropyLoss(reduction='sum')
def get_data(batch_size):
return (torch.rand(batch_size, 1, 28, 28),
torch.LongTensor(batch_size).random_(0, 10))
n = 5
autograd_hacks.add_hooks(model)
data, targets = get_data(n)
output = model(data)
loss1 = loss_fn(output, targets)
data, targets = get_data(n)
output = model(data)
loss2 = loss_fn(output, targets)
loss = loss1 - loss2
loss.backward()
autograd_hacks.compute_grad1(model)
autograd_hacks.disable_hooks()
for n, p in model.named_parameters():
grad1 = p.grad1[0] + p.grad1[1]
assert p.grad.shape == grad1.shape[1:]
assert torch.allclose(p.grad, grad1.mean(dim=0), atol=1e-7)
@pytest.mark.parametrize("hess_type", ['CrossEntropy', 'LeastSquares'])
def test_hess(hess_type):
torch.manual_seed(1)
model = TinyNet()
def least_squares_loss(data_, targets_):
assert len(data_) == len(targets_)
err = data_ - targets_
return torch.sum(err * err) / 2 / len(data_)
n = 3
data = torch.rand(n, 1, 28, 28)
autograd_hacks.add_hooks(model)
output = model(data)
if hess_type == 'LeastSquares':
targets = torch.rand(output.shape)
loss_fn = least_squares_loss
elif hess_type == 'CrossEntropy':
targets = torch.LongTensor(n).random_(0, 10)
loss_fn = nn.CrossEntropyLoss()
else:
raise ValueError(f"Unknown hessian type")
autograd_hacks.backprop_hess(output, hess_type)
autograd_hacks.clear_backprops(model)
autograd_hacks.backprop_hess(output, hess_type)
autograd_hacks.compute_hess(model)
autograd_hacks.disable_hooks()
for layer in model.modules():
if not autograd_hacks.is_supported(layer):
continue
for param in layer.parameters():
loss = loss_fn(output, targets)
hess_autograd = hessian(loss, param)
hess = param.hess
assert torch.allclose(hess, hess_autograd.reshape(hess.shape)) | en | 0.596891 | Lenet-5 from https://github.com/pytorch/examples/blob/master/mnist/main.py Lenet-5 from https://github.com/pytorch/examples/blob/master/mnist/main.py Tiny LeNet-5 for Hessian testing # 28x28 # 7x7 # 6x6 # 3x3 # 2x2 # 1x1 # C * W * H # Autograd helpers, from https://gist.github.com/apaszke/226abdf867c4e9d6698bd198f3b45fb7 # Compare values against autograd | 2.743614 | 3 |
tests/test_command.py | paulfurley/Mailpile | 1 | 10005 | <gh_stars>1-10
import unittest
import mailpile
from mock import patch
from mailpile.commands import Action as action
from tests import MailPileUnittest
class TestCommands(MailPileUnittest):
def test_index(self):
res = self.mp.rescan()
self.assertEqual(res.as_dict()["status"], 'success')
def test_search(self):
# A random search must return results in less than 0.2 seconds.
res = self.mp.search("foo")
self.assertLess(float(res.as_dict()["elapsed"]), 0.2)
def test_optimize(self):
res = self.mp.optimize()
self.assertEqual(res.as_dict()["result"], True)
def test_set(self):
self.mp.set("prefs.num_results=1")
results = self.mp.search("twitter")
self.assertEqual(results.result['stats']['count'], 1)
def test_unset(self):
self.mp.unset("prefs.num_results")
results = self.mp.search("twitter")
self.assertEqual(results.result['stats']['count'], 3)
def test_add(self):
res = self.mp.add("tests")
self.assertEqual(res.as_dict()["result"], True)
def test_add_mailbox_already_in_pile(self):
res = self.mp.add("tests")
self.assertEqual(res.as_dict()["result"], True)
def test_add_mailbox_no_such_directory(self):
res = self.mp.add("wut?")
self.assertEqual(res.as_dict()["result"], False)
def test_output(self):
res = self.mp.output("json")
self.assertEqual(res.as_dict()["result"], {'output': 'json'})
def test_help(self):
res = self.mp.help()
self.assertEqual(len(res.result), 3)
def test_help_variables(self):
res = self.mp.help_variables()
self.assertGreater(len(res.result['variables']), 1)
def test_help_with_param_search(self):
res = self.mp.help('search')
self.assertEqual(res.result['pre'], 'Search your mail!')
def test_help_urlmap_as_text(self):
res = self.mp.help_urlmap()
self.assertEqual(len(res.result), 1)
self.assertGreater(res.as_text(), 0)
def test_crypto_policy_auto_set_all_action(self):
res = self.mp.crypto_policy_auto_set_all()
self.assertEqual(res.as_dict()["message"], u'Discovered crypto policy')
self.assertEqual(set(), res.as_dict()['result'])
def test_crypto_policy_action(self):
res = self.mp.crypto_policy("foobar")
self.assertEqual(res.as_dict()["message"], u'Crypto policy for foobar is none')
self.assertEqual(res.as_dict()["result"], 'none')
class TestCommandResult(MailPileUnittest):
def test_command_result_as_dict(self):
res = self.mp.help_splash()
self.assertGreater(len(res.as_dict()), 0)
def test_command_result_as_text(self):
res = self.mp.help_splash()
self.assertGreater(res.as_text(), 0)
def test_command_result_as_text_for_boolean_result(self):
res = self.mp.rescan()
self.assertEquals(res.result['messages'], 0)
self.assertEquals(res.result['mailboxes'], 0)
self.assertEquals(res.result['vcards'], 0)
def test_command_result_non_zero(self):
res = self.mp.help_splash()
self.assertTrue(res)
def test_command_result_as_json(self):
res = self.mp.help_splash()
self.assertGreater(res.as_json(), 0)
def test_command_result_as_html(self):
res = self.mp.help_splash()
self.assertGreater(res.as_html(), 0)
class TestTagging(MailPileUnittest):
def test_addtag(self):
pass
class TestGPG(MailPileUnittest):
def test_key_search(self):
gpg_result = {
"D13C70DA": {
"uids": [
{
"email": "<EMAIL>"
}
]
}
}
with patch('mailpile.commands.GnuPG') as gpg_mock:
gpg_mock.return_value.search_key.return_value = gpg_result
res = action(self.mp._session, "crypto/gpg/searchkey", "D13C70DA")
email = res.result["D13C70DA"]["uids"][0]["email"]
self.assertEqual(email, "<EMAIL>")
gpg_mock.return_value.search_key.assert_called_with("D13C70DA")
def test_key_receive(self):
gpg_result = {
"updated": [
{
"fingerprint": "<KEY>"
}
]
}
with patch('mailpile.commands.GnuPG') as gpg_mock:
gpg_mock.return_value.recv_key.return_value = gpg_result
res = action(self.mp._session, "crypto/gpg/receivekey", "D13C70DA")
self.assertEqual(res.result[0]["updated"][0]["fingerprint"],
"<KEY>")
gpg_mock.return_value.recv_key.assert_called_with("D13C70DA")
def test_key_import(self):
res = action(self.mp._session, "crypto/gpg/importkey",
'testing/pub.key')
self.assertEqual(res.result["results"]["count"], 1)
def test_nicknym_get_key(self):
pass
def test_nicknym_refresh_key(self):
pass
if __name__ == '__main__':
unittest.main()
| import unittest
import mailpile
from mock import patch
from mailpile.commands import Action as action
from tests import MailPileUnittest
class TestCommands(MailPileUnittest):
def test_index(self):
res = self.mp.rescan()
self.assertEqual(res.as_dict()["status"], 'success')
def test_search(self):
# A random search must return results in less than 0.2 seconds.
res = self.mp.search("foo")
self.assertLess(float(res.as_dict()["elapsed"]), 0.2)
def test_optimize(self):
res = self.mp.optimize()
self.assertEqual(res.as_dict()["result"], True)
def test_set(self):
self.mp.set("prefs.num_results=1")
results = self.mp.search("twitter")
self.assertEqual(results.result['stats']['count'], 1)
def test_unset(self):
self.mp.unset("prefs.num_results")
results = self.mp.search("twitter")
self.assertEqual(results.result['stats']['count'], 3)
def test_add(self):
res = self.mp.add("tests")
self.assertEqual(res.as_dict()["result"], True)
def test_add_mailbox_already_in_pile(self):
res = self.mp.add("tests")
self.assertEqual(res.as_dict()["result"], True)
def test_add_mailbox_no_such_directory(self):
res = self.mp.add("wut?")
self.assertEqual(res.as_dict()["result"], False)
def test_output(self):
res = self.mp.output("json")
self.assertEqual(res.as_dict()["result"], {'output': 'json'})
def test_help(self):
res = self.mp.help()
self.assertEqual(len(res.result), 3)
def test_help_variables(self):
res = self.mp.help_variables()
self.assertGreater(len(res.result['variables']), 1)
def test_help_with_param_search(self):
res = self.mp.help('search')
self.assertEqual(res.result['pre'], 'Search your mail!')
def test_help_urlmap_as_text(self):
res = self.mp.help_urlmap()
self.assertEqual(len(res.result), 1)
self.assertGreater(res.as_text(), 0)
def test_crypto_policy_auto_set_all_action(self):
res = self.mp.crypto_policy_auto_set_all()
self.assertEqual(res.as_dict()["message"], u'Discovered crypto policy')
self.assertEqual(set(), res.as_dict()['result'])
def test_crypto_policy_action(self):
res = self.mp.crypto_policy("foobar")
self.assertEqual(res.as_dict()["message"], u'Crypto policy for foobar is none')
self.assertEqual(res.as_dict()["result"], 'none')
class TestCommandResult(MailPileUnittest):
def test_command_result_as_dict(self):
res = self.mp.help_splash()
self.assertGreater(len(res.as_dict()), 0)
def test_command_result_as_text(self):
res = self.mp.help_splash()
self.assertGreater(res.as_text(), 0)
def test_command_result_as_text_for_boolean_result(self):
res = self.mp.rescan()
self.assertEquals(res.result['messages'], 0)
self.assertEquals(res.result['mailboxes'], 0)
self.assertEquals(res.result['vcards'], 0)
def test_command_result_non_zero(self):
res = self.mp.help_splash()
self.assertTrue(res)
def test_command_result_as_json(self):
res = self.mp.help_splash()
self.assertGreater(res.as_json(), 0)
def test_command_result_as_html(self):
res = self.mp.help_splash()
self.assertGreater(res.as_html(), 0)
class TestTagging(MailPileUnittest):
def test_addtag(self):
pass
class TestGPG(MailPileUnittest):
def test_key_search(self):
gpg_result = {
"D13C70DA": {
"uids": [
{
"email": "<EMAIL>"
}
]
}
}
with patch('mailpile.commands.GnuPG') as gpg_mock:
gpg_mock.return_value.search_key.return_value = gpg_result
res = action(self.mp._session, "crypto/gpg/searchkey", "D13C70DA")
email = res.result["D13C70DA"]["uids"][0]["email"]
self.assertEqual(email, "<EMAIL>")
gpg_mock.return_value.search_key.assert_called_with("D13C70DA")
def test_key_receive(self):
gpg_result = {
"updated": [
{
"fingerprint": "<KEY>"
}
]
}
with patch('mailpile.commands.GnuPG') as gpg_mock:
gpg_mock.return_value.recv_key.return_value = gpg_result
res = action(self.mp._session, "crypto/gpg/receivekey", "D13C70DA")
self.assertEqual(res.result[0]["updated"][0]["fingerprint"],
"<KEY>")
gpg_mock.return_value.recv_key.assert_called_with("D13C70DA")
def test_key_import(self):
res = action(self.mp._session, "crypto/gpg/importkey",
'testing/pub.key')
self.assertEqual(res.result["results"]["count"], 1)
def test_nicknym_get_key(self):
pass
def test_nicknym_refresh_key(self):
pass
if __name__ == '__main__':
unittest.main() | en | 0.916306 | # A random search must return results in less than 0.2 seconds. | 2.635591 | 3 |
navrep/envs/rosnavtrainencodedenv.py | ReykCS/navrep | 0 | 10006 | <reponame>ReykCS/navrep
from gym import spaces
import numpy as np
from scipy import interpolate
import yaml
from navrep.envs.navreptrainenv import NavRepTrainEnv
from navrep.rosnav_models.utils.reward import RewardCalculator
from navrep.rosnav_models.utils.reward import RewardCalculator
class RosnavTrainEncodedEnv(NavRepTrainEnv):
""" takes a (2) action as input
outputs encoded obs (546) """
def __init__(self, roboter_yaml_path, roboter="tb3",
reward_fnc="rule_00", scenario='test',
silent=False, adaptive=True, max_steps_per_episode=500):
super(RosnavTrainEncodedEnv, self).__init__(scenario=scenario, silent=silent, adaptive=adaptive,
legacy_mode=False, collect_statistics=True)
self.setup_by_configuration(roboter_yaml_path)
min, max = self._get_action_space(roboter)
self.action_space = spaces.Box(
low=np.array(min),
high=np.array(max),
dtype=np.float,
)
self.observation_space = spaces.Box(
low=0,
high=np.inf,
shape=(self._laser_num_beams + 2,),
dtype=np.float32,
)
self.reward_calculator = RewardCalculator(
robot_radius=self._robot_radius,
safe_dist=1.6 * self._robot_radius,
goal_radius=0.1,
rule=reward_fnc,
extended_eval=True,
)
self._steps_curr_episode = 0
self._max_steps_per_episode = max_steps_per_episode
self.last_observation = None
self.roboter = roboter
def _get_action_space(self, roboter):
if roboter == "ridgeback":
return [self.linear_range[0], 0, self.angular_range[0]], [self.linear_range[1], 0.5, self.angular_range[1]]
return [self.linear_range[0], self.angular_range[0]], [self.linear_range[1], self.angular_range[1]]
def _get_action(self, action):
if self.roboter == "ridgeback":
return np.array(action)
return np.array([action[0], 0, action[1]])
def _get_observation_from_scan(self, obs):
if self.roboter == "tb3":
lidar_upsampling = 1080 // 360
downsampled_scan = obs.reshape((-1, lidar_upsampling))
downsampled_scan = np.min(downsampled_scan, axis=1)
return downsampled_scan
if self.roboter == "jackal" or self.roboter == "ridgeback":
rotated_scan = np.zeros_like(obs)
rotated_scan[:540] = obs[540:]
rotated_scan[540:] = obs[:540]
downsampled = np.zeros(810)
downsampled[:405] = rotated_scan[135:540]
downsampled[405:] = rotated_scan[540:945]
f = interpolate.interp1d(np.arange(0, 810), downsampled)
upsampled = f(np.linspace(0, 810 - 1, 944))
lidar = upsampled.reshape((-1, 2))
lidar = np.min(lidar, axis=1)
return lidar
if self.roboter == "agv":
rotated_scan = np.zeros_like(obs)
rotated_scan[:540] = obs[540:]
rotated_scan[540:] = obs[:540]
downsampled = np.zeros(540)
downsampled[:270] = rotated_scan[270:540]
downsampled[270:] = rotated_scan[540:810]
f = interpolate.interp1d(np.arange(0, 540), downsampled)
return f(np.linspace(0.0, 540 - 1, 720))
def step(self, action):
self._steps_curr_episode += 1
action_encoded = self._get_action(action)
obs, reward, done, info = super(RosnavTrainEncodedEnv, self).step(action_encoded)
lidar, rho, theta = self._encode_obs(obs)
# reward, reward_info = self.reward_calculator.get_reward(
# np.array(lidar),
# (rho, theta),
# action=np.array([action_encoded[0], action_encoded[2]]),
# global_plan=None,
# robot_pose=None
# )
# done = reward_info["is_done"]
print(reward)
# done = reward_info["is_done"]
observation = np.hstack([lidar, np.array([rho, theta])])
# if done:
# info["done_reason"] = reward_info["done_reason"]
# info["is_success"] = reward_info["is_success"]
# if self._steps_curr_episode > self._max_steps_per_episode:
# done = True
# info["done_reason"] = 0
# info["is_success"] = 0
# if done:
# observation = self.reset()
return observation, 100, done, info
def reset(self, *args, **kwargs):
self.reward_calculator.reset()
self._steps_curr_episode = 0
obs = super(RosnavTrainEncodedEnv, self).reset(*args, **kwargs)
observation, rho, theta = self._encode_obs(obs)
return np.hstack([observation, np.array([rho, theta])])
def _encode_obs(self, obs):
scan, robotstate = obs
lidar = [np.min([self.laser_range, i]) for i in self._get_observation_from_scan(scan)]
self.last_rosnav_scan = lidar
rho, theta = self._get_goal_pose_in_robot_frame(robotstate[:2])
return lidar, rho, theta
def close(self):
super(RosnavTrainEncodedEnv, self).close()
def render(self, mode="human", close=False, save_to_file=False,
robocentric=False, render_decoded_scan=True):
#super(RosnavTrainEncodedEnv, self).render(
# mode=mode, close=close, lidar_scan_override=self.last_rosnav_scan, save_to_file=save_to_file,
# robocentric=robocentric)
pass
def _get_goal_pose_in_robot_frame(self, goal_pos):
y_relative = goal_pos[1]
x_relative = goal_pos[0]
rho = (x_relative ** 2 + y_relative ** 2) ** 0.5
theta = (np.arctan2(y_relative, x_relative) + 4 * np.pi) % (2 * np.pi) - np.pi
return rho, theta
def setup_by_configuration(
self, robot_yaml_path
):
"""get the configuration from the yaml file, including robot radius, discrete action space and continuous action space.
Args: linear_range
linear_ranger): [description]
"""
with open(robot_yaml_path, "r") as fd:
robot_data = yaml.safe_load(fd)
# get robot radius
for body in robot_data["bodies"]:
if body["name"] == "base_footprint":
for footprint in body["footprints"]:
if footprint["radius"]:
self._robot_radius = footprint["radius"] * 1.05
# get laser related information
for plugin in robot_data["plugins"]:
if plugin["type"] == "Laser":
laser_angle_min = plugin["angle"]["min"]
laser_angle_max = plugin["angle"]["max"]
laser_angle_increment = plugin["angle"]["increment"]
self.laser_range = plugin["range"]
self._laser_num_beams = int(
round(
(laser_angle_max - laser_angle_min)
/ laser_angle_increment
)
+ 1
)
self._laser_max_range = plugin["range"]
self.linear_range = robot_data["robot"]["continuous_actions"]["linear_range"]
self.angular_range = robot_data["robot"]["continuous_actions"]["angular_range"]
@staticmethod
def _stack_spaces(ss):
low = []
high = []
for space in ss:
low.extend(space.low.tolist())
high.extend(space.high.tolist())
return spaces.Box(np.array(low).flatten(), np.array(high).flatten()) | from gym import spaces
import numpy as np
from scipy import interpolate
import yaml
from navrep.envs.navreptrainenv import NavRepTrainEnv
from navrep.rosnav_models.utils.reward import RewardCalculator
from navrep.rosnav_models.utils.reward import RewardCalculator
class RosnavTrainEncodedEnv(NavRepTrainEnv):
""" takes a (2) action as input
outputs encoded obs (546) """
def __init__(self, roboter_yaml_path, roboter="tb3",
reward_fnc="rule_00", scenario='test',
silent=False, adaptive=True, max_steps_per_episode=500):
super(RosnavTrainEncodedEnv, self).__init__(scenario=scenario, silent=silent, adaptive=adaptive,
legacy_mode=False, collect_statistics=True)
self.setup_by_configuration(roboter_yaml_path)
min, max = self._get_action_space(roboter)
self.action_space = spaces.Box(
low=np.array(min),
high=np.array(max),
dtype=np.float,
)
self.observation_space = spaces.Box(
low=0,
high=np.inf,
shape=(self._laser_num_beams + 2,),
dtype=np.float32,
)
self.reward_calculator = RewardCalculator(
robot_radius=self._robot_radius,
safe_dist=1.6 * self._robot_radius,
goal_radius=0.1,
rule=reward_fnc,
extended_eval=True,
)
self._steps_curr_episode = 0
self._max_steps_per_episode = max_steps_per_episode
self.last_observation = None
self.roboter = roboter
def _get_action_space(self, roboter):
if roboter == "ridgeback":
return [self.linear_range[0], 0, self.angular_range[0]], [self.linear_range[1], 0.5, self.angular_range[1]]
return [self.linear_range[0], self.angular_range[0]], [self.linear_range[1], self.angular_range[1]]
def _get_action(self, action):
if self.roboter == "ridgeback":
return np.array(action)
return np.array([action[0], 0, action[1]])
def _get_observation_from_scan(self, obs):
if self.roboter == "tb3":
lidar_upsampling = 1080 // 360
downsampled_scan = obs.reshape((-1, lidar_upsampling))
downsampled_scan = np.min(downsampled_scan, axis=1)
return downsampled_scan
if self.roboter == "jackal" or self.roboter == "ridgeback":
rotated_scan = np.zeros_like(obs)
rotated_scan[:540] = obs[540:]
rotated_scan[540:] = obs[:540]
downsampled = np.zeros(810)
downsampled[:405] = rotated_scan[135:540]
downsampled[405:] = rotated_scan[540:945]
f = interpolate.interp1d(np.arange(0, 810), downsampled)
upsampled = f(np.linspace(0, 810 - 1, 944))
lidar = upsampled.reshape((-1, 2))
lidar = np.min(lidar, axis=1)
return lidar
if self.roboter == "agv":
rotated_scan = np.zeros_like(obs)
rotated_scan[:540] = obs[540:]
rotated_scan[540:] = obs[:540]
downsampled = np.zeros(540)
downsampled[:270] = rotated_scan[270:540]
downsampled[270:] = rotated_scan[540:810]
f = interpolate.interp1d(np.arange(0, 540), downsampled)
return f(np.linspace(0.0, 540 - 1, 720))
def step(self, action):
self._steps_curr_episode += 1
action_encoded = self._get_action(action)
obs, reward, done, info = super(RosnavTrainEncodedEnv, self).step(action_encoded)
lidar, rho, theta = self._encode_obs(obs)
# reward, reward_info = self.reward_calculator.get_reward(
# np.array(lidar),
# (rho, theta),
# action=np.array([action_encoded[0], action_encoded[2]]),
# global_plan=None,
# robot_pose=None
# )
# done = reward_info["is_done"]
print(reward)
# done = reward_info["is_done"]
observation = np.hstack([lidar, np.array([rho, theta])])
# if done:
# info["done_reason"] = reward_info["done_reason"]
# info["is_success"] = reward_info["is_success"]
# if self._steps_curr_episode > self._max_steps_per_episode:
# done = True
# info["done_reason"] = 0
# info["is_success"] = 0
# if done:
# observation = self.reset()
return observation, 100, done, info
def reset(self, *args, **kwargs):
self.reward_calculator.reset()
self._steps_curr_episode = 0
obs = super(RosnavTrainEncodedEnv, self).reset(*args, **kwargs)
observation, rho, theta = self._encode_obs(obs)
return np.hstack([observation, np.array([rho, theta])])
def _encode_obs(self, obs):
scan, robotstate = obs
lidar = [np.min([self.laser_range, i]) for i in self._get_observation_from_scan(scan)]
self.last_rosnav_scan = lidar
rho, theta = self._get_goal_pose_in_robot_frame(robotstate[:2])
return lidar, rho, theta
def close(self):
super(RosnavTrainEncodedEnv, self).close()
def render(self, mode="human", close=False, save_to_file=False,
robocentric=False, render_decoded_scan=True):
#super(RosnavTrainEncodedEnv, self).render(
# mode=mode, close=close, lidar_scan_override=self.last_rosnav_scan, save_to_file=save_to_file,
# robocentric=robocentric)
pass
def _get_goal_pose_in_robot_frame(self, goal_pos):
y_relative = goal_pos[1]
x_relative = goal_pos[0]
rho = (x_relative ** 2 + y_relative ** 2) ** 0.5
theta = (np.arctan2(y_relative, x_relative) + 4 * np.pi) % (2 * np.pi) - np.pi
return rho, theta
def setup_by_configuration(
self, robot_yaml_path
):
"""get the configuration from the yaml file, including robot radius, discrete action space and continuous action space.
Args: linear_range
linear_ranger): [description]
"""
with open(robot_yaml_path, "r") as fd:
robot_data = yaml.safe_load(fd)
# get robot radius
for body in robot_data["bodies"]:
if body["name"] == "base_footprint":
for footprint in body["footprints"]:
if footprint["radius"]:
self._robot_radius = footprint["radius"] * 1.05
# get laser related information
for plugin in robot_data["plugins"]:
if plugin["type"] == "Laser":
laser_angle_min = plugin["angle"]["min"]
laser_angle_max = plugin["angle"]["max"]
laser_angle_increment = plugin["angle"]["increment"]
self.laser_range = plugin["range"]
self._laser_num_beams = int(
round(
(laser_angle_max - laser_angle_min)
/ laser_angle_increment
)
+ 1
)
self._laser_max_range = plugin["range"]
self.linear_range = robot_data["robot"]["continuous_actions"]["linear_range"]
self.angular_range = robot_data["robot"]["continuous_actions"]["angular_range"]
@staticmethod
def _stack_spaces(ss):
low = []
high = []
for space in ss:
low.extend(space.low.tolist())
high.extend(space.high.tolist())
return spaces.Box(np.array(low).flatten(), np.array(high).flatten()) | en | 0.492909 | takes a (2) action as input outputs encoded obs (546) # reward, reward_info = self.reward_calculator.get_reward( # np.array(lidar), # (rho, theta), # action=np.array([action_encoded[0], action_encoded[2]]), # global_plan=None, # robot_pose=None # ) # done = reward_info["is_done"] # done = reward_info["is_done"] # if done: # info["done_reason"] = reward_info["done_reason"] # info["is_success"] = reward_info["is_success"] # if self._steps_curr_episode > self._max_steps_per_episode: # done = True # info["done_reason"] = 0 # info["is_success"] = 0 # if done: # observation = self.reset() #super(RosnavTrainEncodedEnv, self).render( # mode=mode, close=close, lidar_scan_override=self.last_rosnav_scan, save_to_file=save_to_file, # robocentric=robocentric) get the configuration from the yaml file, including robot radius, discrete action space and continuous action space. Args: linear_range linear_ranger): [description] # get robot radius # get laser related information | 2.229795 | 2 |
pdm/models/repositories.py | gaojiuli/pdm | 1 | 10007 | <gh_stars>1-10
from __future__ import annotations
import sys
from functools import wraps
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple
from pdm._types import CandidateInfo, Source
from pdm.context import context
from pdm.exceptions import CandidateInfoNotFound, CorruptedCacheError
from pdm.models.candidates import Candidate
from pdm.models.requirements import (
Requirement,
filter_requirements_with_extras,
parse_requirement,
)
from pdm.models.specifiers import PySpecSet, SpecifierSet
from pdm.utils import allow_all_wheels
if TYPE_CHECKING:
from pdm.models.environment import Environment
def cache_result(
func: Callable[["BaseRepository", Candidate], CandidateInfo]
) -> Callable[["BaseRepository", Candidate], CandidateInfo]:
@wraps(func)
def wrapper(self, candidate: Candidate) -> CandidateInfo:
result = func(self, candidate)
self._candidate_info_cache.set(candidate, result)
return result
return wrapper
class BaseRepository:
"""A Repository acts as the source of packages and metadata."""
def __init__(self, sources: List[Source], environment: Environment) -> None:
"""
:param sources: a list of sources to download packages from.
:param environment: the bound environment instance.
"""
self.sources = sources
self.environment = environment
self._candidate_info_cache = context.make_candidate_info_cache()
self._hash_cache = context.make_hash_cache()
def get_filtered_sources(self, req: Requirement) -> List[Source]:
"""Get matching sources based on the index attribute."""
if not req.index:
return self.sources
return [source for source in self.sources if source["name"] == req.index]
def get_dependencies(
self, candidate: Candidate
) -> Tuple[List[Requirement], PySpecSet, str]:
"""Get (dependencies, python_specifier, summary) of the candidate."""
requirements, requires_python, summary = [], "", ""
last_ext_info = None
for getter in self.dependency_generators():
try:
requirements, requires_python, summary = getter(candidate)
except CandidateInfoNotFound:
last_ext_info = sys.exc_info()
continue
break
else:
if last_ext_info is not None:
raise last_ext_info[1].with_traceback(last_ext_info[2])
requirements = [parse_requirement(line) for line in requirements]
if candidate.req.extras:
# HACK: If this candidate has extras, add the original candidate
# (same pinned version, no extras) as its dependency. This ensures
# the same package with different extras (treated as distinct by
# the resolver) have the same version.
self_req = candidate.req.copy()
self_req.extras = None
requirements.append(self_req)
return requirements, PySpecSet(requires_python), summary
def find_matches(
self,
requirement: Requirement,
requires_python: PySpecSet = PySpecSet(),
allow_prereleases: Optional[bool] = None,
allow_all: bool = False,
) -> List[Candidate]:
"""Find matching candidates of a requirement.
:param requirement: the given requirement.
:param requires_python: the Python version constraint.
:param allow_prereleases: whether allow prerelease versions, or let us determine
if not given. If no non-prerelease is available, prereleases will be used.
:param allow_all: whether allow all wheels.
:returns: a list of candidates.
"""
if requirement.is_named:
return self._find_named_matches(
requirement, requires_python, allow_prereleases, allow_all
)
else:
# Fetch metadata so that resolver can know the candidate's name.
can = Candidate(requirement, self.environment)
can.get_metadata()
return [can]
def _find_named_matches(
self,
requirement: Requirement,
requires_python: PySpecSet = PySpecSet(),
allow_prereleases: Optional[bool] = None,
allow_all: bool = False,
) -> List[Candidate]:
"""Find candidates of the given NamedRequirement. Let it to be implemented in
subclasses.
"""
raise NotImplementedError
def _get_dependencies_from_cache(self, candidate: Candidate) -> CandidateInfo:
try:
result = self._candidate_info_cache.get(candidate)
except CorruptedCacheError:
self._candidate_info_cache.clear()
raise CandidateInfoNotFound(candidate)
except KeyError:
raise CandidateInfoNotFound(candidate)
return result
@cache_result
def _get_dependencies_from_metadata(self, candidate: Candidate) -> CandidateInfo:
deps = candidate.get_dependencies_from_metadata()
requires_python = candidate.requires_python
summary = candidate.metadata.summary
return deps, requires_python, summary
def get_hashes(self, candidate: Candidate) -> Optional[Dict[str, str]]:
"""Get hashes of all possible installable candidates of a given package version.
"""
if (
candidate.req.is_vcs
or candidate.req.is_file_or_url
and candidate.req.is_local_dir
):
return
if candidate.hashes:
return candidate.hashes
req = candidate.req.copy()
req.specifier = SpecifierSet(f"=={candidate.version}")
matching_candidates = self.find_matches(req, allow_all=True)
with self.environment.get_finder(self.sources) as finder:
self._hash_cache.session = finder.session
return {
c.link.filename: self._hash_cache.get_hash(c.link)
for c in matching_candidates
}
def dependency_generators(self) -> Iterable[Callable[[Candidate], CandidateInfo]]:
"""Return an iterable of getter functions to get dependencies, which will be
called one by one.
"""
raise NotImplementedError
class PyPIRepository(BaseRepository):
"""Get package and metadata from PyPI source."""
@cache_result
def _get_dependencies_from_json(self, candidate: Candidate) -> CandidateInfo:
if not candidate.name or not candidate.version:
# Only look for json api for named requirements.
raise CandidateInfoNotFound(candidate)
sources = self.get_filtered_sources(candidate.req)
url_prefixes = [
proc_url[:-7] # Strip "/simple".
for proc_url in (
raw_url.rstrip("/")
for raw_url in (source.get("url", "") for source in sources)
)
if proc_url.endswith("/simple")
]
with self.environment.get_finder(sources) as finder:
session = finder.session
for prefix in url_prefixes:
json_url = f"{prefix}/pypi/{candidate.name}/{candidate.version}/json"
resp = session.get(json_url)
if not resp.ok:
continue
info = resp.json()["info"]
requires_python = info["requires_python"] or ""
summary = info["summary"] or ""
try:
requirement_lines = info["requires_dist"] or []
except KeyError:
requirement_lines = info["requires"] or []
requirements = filter_requirements_with_extras(
requirement_lines, candidate.req.extras or ()
)
return requirements, requires_python, summary
raise CandidateInfoNotFound(candidate)
def dependency_generators(self) -> Iterable[Callable[[Candidate], CandidateInfo]]:
return (
self._get_dependencies_from_cache,
self._get_dependencies_from_json,
self._get_dependencies_from_metadata,
)
def _find_named_matches(
self,
requirement: Requirement,
requires_python: PySpecSet = PySpecSet(),
allow_prereleases: Optional[bool] = None,
allow_all: bool = False,
) -> List[Candidate]:
sources = self.get_filtered_sources(requirement)
# `allow_prereleases` is None means leave it to specifier to decide whether to
# include prereleases
if allow_prereleases is None:
allow_prereleases = requirement.allow_prereleases
with self.environment.get_finder(sources) as finder, allow_all_wheels():
cans = [
Candidate.from_installation_candidate(c, requirement, self.environment)
for c in finder.find_all_candidates(requirement.project_name)
]
sorted_cans = sorted(
(
c
for c in cans
if requirement.specifier.contains(c.version, allow_prereleases)
and (allow_all or requires_python.is_subset(c.requires_python))
),
key=lambda c: (c.version, c.link.is_wheel),
)
if not sorted_cans and allow_prereleases is None:
# No non-pre-releases is found, force pre-releases now
sorted_cans = sorted(
(
c
for c in cans
if requirement.specifier.contains(c.version, True)
and (allow_all or requires_python.is_subset(c.requires_python))
),
key=lambda c: c.version,
)
return sorted_cans
| from __future__ import annotations
import sys
from functools import wraps
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple
from pdm._types import CandidateInfo, Source
from pdm.context import context
from pdm.exceptions import CandidateInfoNotFound, CorruptedCacheError
from pdm.models.candidates import Candidate
from pdm.models.requirements import (
Requirement,
filter_requirements_with_extras,
parse_requirement,
)
from pdm.models.specifiers import PySpecSet, SpecifierSet
from pdm.utils import allow_all_wheels
if TYPE_CHECKING:
from pdm.models.environment import Environment
def cache_result(
func: Callable[["BaseRepository", Candidate], CandidateInfo]
) -> Callable[["BaseRepository", Candidate], CandidateInfo]:
@wraps(func)
def wrapper(self, candidate: Candidate) -> CandidateInfo:
result = func(self, candidate)
self._candidate_info_cache.set(candidate, result)
return result
return wrapper
class BaseRepository:
"""A Repository acts as the source of packages and metadata."""
def __init__(self, sources: List[Source], environment: Environment) -> None:
"""
:param sources: a list of sources to download packages from.
:param environment: the bound environment instance.
"""
self.sources = sources
self.environment = environment
self._candidate_info_cache = context.make_candidate_info_cache()
self._hash_cache = context.make_hash_cache()
def get_filtered_sources(self, req: Requirement) -> List[Source]:
"""Get matching sources based on the index attribute."""
if not req.index:
return self.sources
return [source for source in self.sources if source["name"] == req.index]
def get_dependencies(
self, candidate: Candidate
) -> Tuple[List[Requirement], PySpecSet, str]:
"""Get (dependencies, python_specifier, summary) of the candidate."""
requirements, requires_python, summary = [], "", ""
last_ext_info = None
for getter in self.dependency_generators():
try:
requirements, requires_python, summary = getter(candidate)
except CandidateInfoNotFound:
last_ext_info = sys.exc_info()
continue
break
else:
if last_ext_info is not None:
raise last_ext_info[1].with_traceback(last_ext_info[2])
requirements = [parse_requirement(line) for line in requirements]
if candidate.req.extras:
# HACK: If this candidate has extras, add the original candidate
# (same pinned version, no extras) as its dependency. This ensures
# the same package with different extras (treated as distinct by
# the resolver) have the same version.
self_req = candidate.req.copy()
self_req.extras = None
requirements.append(self_req)
return requirements, PySpecSet(requires_python), summary
def find_matches(
self,
requirement: Requirement,
requires_python: PySpecSet = PySpecSet(),
allow_prereleases: Optional[bool] = None,
allow_all: bool = False,
) -> List[Candidate]:
"""Find matching candidates of a requirement.
:param requirement: the given requirement.
:param requires_python: the Python version constraint.
:param allow_prereleases: whether allow prerelease versions, or let us determine
if not given. If no non-prerelease is available, prereleases will be used.
:param allow_all: whether allow all wheels.
:returns: a list of candidates.
"""
if requirement.is_named:
return self._find_named_matches(
requirement, requires_python, allow_prereleases, allow_all
)
else:
# Fetch metadata so that resolver can know the candidate's name.
can = Candidate(requirement, self.environment)
can.get_metadata()
return [can]
def _find_named_matches(
self,
requirement: Requirement,
requires_python: PySpecSet = PySpecSet(),
allow_prereleases: Optional[bool] = None,
allow_all: bool = False,
) -> List[Candidate]:
"""Find candidates of the given NamedRequirement. Let it to be implemented in
subclasses.
"""
raise NotImplementedError
def _get_dependencies_from_cache(self, candidate: Candidate) -> CandidateInfo:
try:
result = self._candidate_info_cache.get(candidate)
except CorruptedCacheError:
self._candidate_info_cache.clear()
raise CandidateInfoNotFound(candidate)
except KeyError:
raise CandidateInfoNotFound(candidate)
return result
@cache_result
def _get_dependencies_from_metadata(self, candidate: Candidate) -> CandidateInfo:
deps = candidate.get_dependencies_from_metadata()
requires_python = candidate.requires_python
summary = candidate.metadata.summary
return deps, requires_python, summary
def get_hashes(self, candidate: Candidate) -> Optional[Dict[str, str]]:
"""Get hashes of all possible installable candidates of a given package version.
"""
if (
candidate.req.is_vcs
or candidate.req.is_file_or_url
and candidate.req.is_local_dir
):
return
if candidate.hashes:
return candidate.hashes
req = candidate.req.copy()
req.specifier = SpecifierSet(f"=={candidate.version}")
matching_candidates = self.find_matches(req, allow_all=True)
with self.environment.get_finder(self.sources) as finder:
self._hash_cache.session = finder.session
return {
c.link.filename: self._hash_cache.get_hash(c.link)
for c in matching_candidates
}
def dependency_generators(self) -> Iterable[Callable[[Candidate], CandidateInfo]]:
"""Return an iterable of getter functions to get dependencies, which will be
called one by one.
"""
raise NotImplementedError
class PyPIRepository(BaseRepository):
"""Get package and metadata from PyPI source."""
@cache_result
def _get_dependencies_from_json(self, candidate: Candidate) -> CandidateInfo:
if not candidate.name or not candidate.version:
# Only look for json api for named requirements.
raise CandidateInfoNotFound(candidate)
sources = self.get_filtered_sources(candidate.req)
url_prefixes = [
proc_url[:-7] # Strip "/simple".
for proc_url in (
raw_url.rstrip("/")
for raw_url in (source.get("url", "") for source in sources)
)
if proc_url.endswith("/simple")
]
with self.environment.get_finder(sources) as finder:
session = finder.session
for prefix in url_prefixes:
json_url = f"{prefix}/pypi/{candidate.name}/{candidate.version}/json"
resp = session.get(json_url)
if not resp.ok:
continue
info = resp.json()["info"]
requires_python = info["requires_python"] or ""
summary = info["summary"] or ""
try:
requirement_lines = info["requires_dist"] or []
except KeyError:
requirement_lines = info["requires"] or []
requirements = filter_requirements_with_extras(
requirement_lines, candidate.req.extras or ()
)
return requirements, requires_python, summary
raise CandidateInfoNotFound(candidate)
def dependency_generators(self) -> Iterable[Callable[[Candidate], CandidateInfo]]:
return (
self._get_dependencies_from_cache,
self._get_dependencies_from_json,
self._get_dependencies_from_metadata,
)
def _find_named_matches(
self,
requirement: Requirement,
requires_python: PySpecSet = PySpecSet(),
allow_prereleases: Optional[bool] = None,
allow_all: bool = False,
) -> List[Candidate]:
sources = self.get_filtered_sources(requirement)
# `allow_prereleases` is None means leave it to specifier to decide whether to
# include prereleases
if allow_prereleases is None:
allow_prereleases = requirement.allow_prereleases
with self.environment.get_finder(sources) as finder, allow_all_wheels():
cans = [
Candidate.from_installation_candidate(c, requirement, self.environment)
for c in finder.find_all_candidates(requirement.project_name)
]
sorted_cans = sorted(
(
c
for c in cans
if requirement.specifier.contains(c.version, allow_prereleases)
and (allow_all or requires_python.is_subset(c.requires_python))
),
key=lambda c: (c.version, c.link.is_wheel),
)
if not sorted_cans and allow_prereleases is None:
# No non-pre-releases is found, force pre-releases now
sorted_cans = sorted(
(
c
for c in cans
if requirement.specifier.contains(c.version, True)
and (allow_all or requires_python.is_subset(c.requires_python))
),
key=lambda c: c.version,
)
return sorted_cans | en | 0.848441 | A Repository acts as the source of packages and metadata. :param sources: a list of sources to download packages from. :param environment: the bound environment instance. Get matching sources based on the index attribute. Get (dependencies, python_specifier, summary) of the candidate. # HACK: If this candidate has extras, add the original candidate # (same pinned version, no extras) as its dependency. This ensures # the same package with different extras (treated as distinct by # the resolver) have the same version. Find matching candidates of a requirement. :param requirement: the given requirement. :param requires_python: the Python version constraint. :param allow_prereleases: whether allow prerelease versions, or let us determine if not given. If no non-prerelease is available, prereleases will be used. :param allow_all: whether allow all wheels. :returns: a list of candidates. # Fetch metadata so that resolver can know the candidate's name. Find candidates of the given NamedRequirement. Let it to be implemented in subclasses. Get hashes of all possible installable candidates of a given package version. Return an iterable of getter functions to get dependencies, which will be called one by one. Get package and metadata from PyPI source. # Only look for json api for named requirements. # Strip "/simple". # `allow_prereleases` is None means leave it to specifier to decide whether to # include prereleases # No non-pre-releases is found, force pre-releases now | 2.043721 | 2 |
tests/backends/test_cookie.py | euri10/starsessions | 31 | 10008 | import pytest
from starsessions import SessionBackend
@pytest.mark.asyncio
async def test_cookie_read_write(cookie: SessionBackend, session_payload: dict) -> None:
new_id = await cookie.write(session_payload, "session_id")
assert await cookie.read(new_id) == session_payload
@pytest.mark.asyncio
async def test_cookie_remove(cookie: SessionBackend) -> None:
await cookie.remove("session_id")
@pytest.mark.asyncio
async def test_cookie_exists(cookie: SessionBackend) -> None:
assert await cookie.exists("session_id") is False
@pytest.mark.asyncio
async def test_cookie_generate_id(cookie: SessionBackend) -> None:
new_id = await cookie.generate_id()
assert isinstance(new_id, str)
| import pytest
from starsessions import SessionBackend
@pytest.mark.asyncio
async def test_cookie_read_write(cookie: SessionBackend, session_payload: dict) -> None:
new_id = await cookie.write(session_payload, "session_id")
assert await cookie.read(new_id) == session_payload
@pytest.mark.asyncio
async def test_cookie_remove(cookie: SessionBackend) -> None:
await cookie.remove("session_id")
@pytest.mark.asyncio
async def test_cookie_exists(cookie: SessionBackend) -> None:
assert await cookie.exists("session_id") is False
@pytest.mark.asyncio
async def test_cookie_generate_id(cookie: SessionBackend) -> None:
new_id = await cookie.generate_id()
assert isinstance(new_id, str)
| none | 1 | 2.278279 | 2 |
|
sqlalchemy_dremio/db.py | thbeh/sqlalchemy_dremio | 14 | 10009 | <gh_stars>10-100
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from pyarrow import flight
from sqlalchemy_dremio.exceptions import Error, NotSupportedError
from sqlalchemy_dremio.flight_auth import HttpDremioClientAuthHandler
from sqlalchemy_dremio.query import execute
logger = logging.getLogger(__name__)
paramstyle = 'qmark'
def connect(c):
return Connection(c)
def check_closed(f):
"""Decorator that checks if connection/cursor is closed."""
def g(self, *args, **kwargs):
if self.closed:
raise Error(
'{klass} already closed'.format(klass=self.__class__.__name__))
return f(self, *args, **kwargs)
return g
def check_result(f):
"""Decorator that checks if the cursor has results from `execute`."""
def d(self, *args, **kwargs):
if self._results is None:
raise Error('Called before `execute`')
return f(self, *args, **kwargs)
return d
class Connection(object):
def __init__(self, connection_string):
# TODO: Find a better way to extend to addition flight parameters
splits = connection_string.split(";")
client = flight.FlightClient('grpc+tcp://{0}:{1}'.format(splits[2].split("=")[1], splits[3].split("=")[1]))
client.authenticate(HttpDremioClientAuthHandler(splits[0].split("=")[1], splits[1].split("=")[1]))
self.flightclient = client
self.closed = False
self.cursors = []
@check_closed
def rollback(self):
pass
@check_closed
def close(self):
"""Close the connection now."""
self.closed = True
for cursor in self.cursors:
try:
cursor.close()
except Error:
pass # already closed
@check_closed
def commit(self):
pass
@check_closed
def cursor(self):
"""Return a new Cursor Object using the connection."""
cursor = Cursor(self.flightclient)
self.cursors.append(cursor)
return cursor
@check_closed
def execute(self, query):
cursor = self.cursor()
return cursor.execute(query)
def __enter__(self):
return self
def __exit__(self, *exc):
self.commit() # no-op
self.close()
class Cursor(object):
"""Connection cursor."""
def __init__(self, flightclient=None):
self.flightclient = flightclient
# This read/write attribute specifies the number of rows to fetch at a
# time with .fetchmany(). It defaults to 1 meaning to fetch a single
# row at a time.
self.arraysize = 1
self.closed = False
# this is updated only after a query
self.description = None
# this is set to a list of rows after a successful query
self._results = None
@property
@check_result
@check_closed
def rowcount(self):
return len(self._results)
@check_closed
def close(self):
"""Close the cursor."""
self.closed = True
@check_closed
def execute(self, query, params=None):
self.description = None
self._results, self.description = execute(
query, self.flightclient)
return self
@check_closed
def executemany(self, query):
raise NotSupportedError(
'`executemany` is not supported, use `execute` instead')
@check_result
@check_closed
def fetchone(self):
"""
Fetch the next row of a query result set, returning a single sequence,
or `None` when no more data is available.
"""
try:
return self._results.pop(0)
except IndexError:
return None
@check_result
@check_closed
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of
sequences (e.g. a list of tuples). An empty sequence is returned when
no more rows are available.
"""
size = size or self.arraysize
out = self._results[:size]
self._results = self._results[size:]
return out
@check_result
@check_closed
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a
sequence of sequences (e.g. a list of tuples). Note that the cursor's
arraysize attribute can affect the performance of this operation.
"""
out = self._results[:]
self._results = []
return out
@check_closed
def setinputsizes(self, sizes):
# not supported
pass
@check_closed
def setoutputsizes(self, sizes):
# not supported
pass
@check_closed
def __iter__(self):
return iter(self._results)
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from pyarrow import flight
from sqlalchemy_dremio.exceptions import Error, NotSupportedError
from sqlalchemy_dremio.flight_auth import HttpDremioClientAuthHandler
from sqlalchemy_dremio.query import execute
logger = logging.getLogger(__name__)
paramstyle = 'qmark'
def connect(c):
return Connection(c)
def check_closed(f):
"""Decorator that checks if connection/cursor is closed."""
def g(self, *args, **kwargs):
if self.closed:
raise Error(
'{klass} already closed'.format(klass=self.__class__.__name__))
return f(self, *args, **kwargs)
return g
def check_result(f):
"""Decorator that checks if the cursor has results from `execute`."""
def d(self, *args, **kwargs):
if self._results is None:
raise Error('Called before `execute`')
return f(self, *args, **kwargs)
return d
class Connection(object):
def __init__(self, connection_string):
# TODO: Find a better way to extend to addition flight parameters
splits = connection_string.split(";")
client = flight.FlightClient('grpc+tcp://{0}:{1}'.format(splits[2].split("=")[1], splits[3].split("=")[1]))
client.authenticate(HttpDremioClientAuthHandler(splits[0].split("=")[1], splits[1].split("=")[1]))
self.flightclient = client
self.closed = False
self.cursors = []
@check_closed
def rollback(self):
pass
@check_closed
def close(self):
"""Close the connection now."""
self.closed = True
for cursor in self.cursors:
try:
cursor.close()
except Error:
pass # already closed
@check_closed
def commit(self):
pass
@check_closed
def cursor(self):
"""Return a new Cursor Object using the connection."""
cursor = Cursor(self.flightclient)
self.cursors.append(cursor)
return cursor
@check_closed
def execute(self, query):
cursor = self.cursor()
return cursor.execute(query)
def __enter__(self):
return self
def __exit__(self, *exc):
self.commit() # no-op
self.close()
class Cursor(object):
"""Connection cursor."""
def __init__(self, flightclient=None):
self.flightclient = flightclient
# This read/write attribute specifies the number of rows to fetch at a
# time with .fetchmany(). It defaults to 1 meaning to fetch a single
# row at a time.
self.arraysize = 1
self.closed = False
# this is updated only after a query
self.description = None
# this is set to a list of rows after a successful query
self._results = None
@property
@check_result
@check_closed
def rowcount(self):
return len(self._results)
@check_closed
def close(self):
"""Close the cursor."""
self.closed = True
@check_closed
def execute(self, query, params=None):
self.description = None
self._results, self.description = execute(
query, self.flightclient)
return self
@check_closed
def executemany(self, query):
raise NotSupportedError(
'`executemany` is not supported, use `execute` instead')
@check_result
@check_closed
def fetchone(self):
"""
Fetch the next row of a query result set, returning a single sequence,
or `None` when no more data is available.
"""
try:
return self._results.pop(0)
except IndexError:
return None
@check_result
@check_closed
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of
sequences (e.g. a list of tuples). An empty sequence is returned when
no more rows are available.
"""
size = size or self.arraysize
out = self._results[:size]
self._results = self._results[size:]
return out
@check_result
@check_closed
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a
sequence of sequences (e.g. a list of tuples). Note that the cursor's
arraysize attribute can affect the performance of this operation.
"""
out = self._results[:]
self._results = []
return out
@check_closed
def setinputsizes(self, sizes):
# not supported
pass
@check_closed
def setoutputsizes(self, sizes):
# not supported
pass
@check_closed
def __iter__(self):
return iter(self._results) | en | 0.854859 | Decorator that checks if connection/cursor is closed. Decorator that checks if the cursor has results from `execute`. # TODO: Find a better way to extend to addition flight parameters Close the connection now. # already closed Return a new Cursor Object using the connection. # no-op Connection cursor. # This read/write attribute specifies the number of rows to fetch at a # time with .fetchmany(). It defaults to 1 meaning to fetch a single # row at a time. # this is updated only after a query # this is set to a list of rows after a successful query Close the cursor. Fetch the next row of a query result set, returning a single sequence, or `None` when no more data is available. Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a list of tuples). An empty sequence is returned when no more rows are available. Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. # not supported # not supported | 2.231761 | 2 |
docker-images/rasa2/snips_services/tts_server.py | sanyaade-machine-learning/opensnips_original | 57 | 10010 | <gh_stars>10-100
#!/opt/rasa/anaconda/bin/python
# -*-: coding utf-8 -*-
""" Snips core and nlu server. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import time
import os
from socket import error as socket_error
from SnipsMqttServer import SnipsMqttServer
import paho.mqtt.client as mqtt
from thread_handler import ThreadHandler
import sys,warnings
# apt-get install sox libsox-fmt-all
import sox
class SnipsTTSServer(SnipsMqttServer):
def __init__(self,
mqtt_hostname='mosquitto',
mqtt_port=1883,
):
SnipsMqttServer.__init__(self,mqtt_hostname,mqtt_port)
self.subscribe_to='hermes/tts/say'
def on_message(self, client, userdata, msg):
#print("MESSAGEtts: {}".format(msg.topic))
if msg.topic is not None and msg.topic=="hermes/tts/say":
print("MESSAGE OK: {}".format(msg.topic))
payload = json.loads(msg.payload)
# .decode('utf-8')
sessionId = payload.get('sessionId')
siteId = payload.get('siteId','default')
lang = payload.get('lang','en-GB')
theId = sessionId
fileName = '/tmp/speaking.wav'
os.system('/usr/bin/pico2wave -w=' + fileName + ' "{}" '.format(payload.get('text')))
#pubCommand = "mosquitto_pub -h " +self.mqtt_hostname+" -t 'hermes/audioServer/default/playBytes/0049a91e-8449-4398-9752-07c858234' -f '" + fileName + "'"
#print(pubCommand)
#os.system(pubCommand)
fp = open(fileName)
f = fp.read()
topic = 'hermes/audioServer/{}/playBytes'.format(siteId)
if theId is not None:
topic = topic + '/{}'.format(theId[::-1])
self.client.publish(topic, payload=bytes(f),qos=0)
#print("PUBLISHED on " + topic)
os.remove(fileName)
server = SnipsTTSServer()
server.start()
| #!/opt/rasa/anaconda/bin/python
# -*-: coding utf-8 -*-
""" Snips core and nlu server. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import time
import os
from socket import error as socket_error
from SnipsMqttServer import SnipsMqttServer
import paho.mqtt.client as mqtt
from thread_handler import ThreadHandler
import sys,warnings
# apt-get install sox libsox-fmt-all
import sox
class SnipsTTSServer(SnipsMqttServer):
def __init__(self,
mqtt_hostname='mosquitto',
mqtt_port=1883,
):
SnipsMqttServer.__init__(self,mqtt_hostname,mqtt_port)
self.subscribe_to='hermes/tts/say'
def on_message(self, client, userdata, msg):
#print("MESSAGEtts: {}".format(msg.topic))
if msg.topic is not None and msg.topic=="hermes/tts/say":
print("MESSAGE OK: {}".format(msg.topic))
payload = json.loads(msg.payload)
# .decode('utf-8')
sessionId = payload.get('sessionId')
siteId = payload.get('siteId','default')
lang = payload.get('lang','en-GB')
theId = sessionId
fileName = '/tmp/speaking.wav'
os.system('/usr/bin/pico2wave -w=' + fileName + ' "{}" '.format(payload.get('text')))
#pubCommand = "mosquitto_pub -h " +self.mqtt_hostname+" -t 'hermes/audioServer/default/playBytes/0049a91e-8449-4398-9752-07c858234' -f '" + fileName + "'"
#print(pubCommand)
#os.system(pubCommand)
fp = open(fileName)
f = fp.read()
topic = 'hermes/audioServer/{}/playBytes'.format(siteId)
if theId is not None:
topic = topic + '/{}'.format(theId[::-1])
self.client.publish(topic, payload=bytes(f),qos=0)
#print("PUBLISHED on " + topic)
os.remove(fileName)
server = SnipsTTSServer()
server.start() | en | 0.424156 | #!/opt/rasa/anaconda/bin/python # -*-: coding utf-8 -*- Snips core and nlu server. # apt-get install sox libsox-fmt-all #print("MESSAGEtts: {}".format(msg.topic)) # .decode('utf-8') #pubCommand = "mosquitto_pub -h " +self.mqtt_hostname+" -t 'hermes/audioServer/default/playBytes/0049a91e-8449-4398-9752-07c858234' -f '" + fileName + "'" #print(pubCommand) #os.system(pubCommand) #print("PUBLISHED on " + topic) | 2.342232 | 2 |
gtd/migrations/0018_context_color.py | jimbofreedman/naggingnelly-api | 0 | 10011 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-08-02 17:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gtd', '0017_auto_20180108_1508'),
]
operations = [
migrations.AddField(
model_name='context',
name='color',
field=models.CharField(default='ffffff', max_length=6),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-08-02 17:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gtd', '0017_auto_20180108_1508'),
]
operations = [
migrations.AddField(
model_name='context',
name='color',
field=models.CharField(default='ffffff', max_length=6),
),
] | en | 0.715825 | # -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-08-02 17:53 | 1.488076 | 1 |
neuralintents/main.py | nitori/neuralintents | 0 | 10012 | from abc import ABCMeta, abstractmethod
import random
import json
import pickle
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import nltk
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.models import load_model
nltk.download('punkt', quiet=True)
nltk.download('wordnet', quiet=True)
class IAssistant(metaclass=ABCMeta):
@abstractmethod
def train_model(self):
""" Implemented in child class """
@abstractmethod
def request_tag(self, message):
""" Implemented in child class """
@abstractmethod
def get_tag_by_id(self, id):
""" Implemented in child class """
@abstractmethod
def request_method(self, message):
""" Implemented in child class """
@abstractmethod
def request(self, message):
""" Implemented in child class """
class GenericAssistant(IAssistant):
def __init__(self, intents, intent_methods={}, model_name="assistant_model", *, json_encoding='utf-8'):
self.intents = intents
self.intent_methods = intent_methods
self.model_name = model_name
self.json_encoding = json_encoding
if intents.endswith(".json"):
self.load_json_intents(intents)
self.lemmatizer = WordNetLemmatizer()
def load_json_intents(self, intents):
with open(intents, encoding=self.json_encoding) as f:
self.intents = json.load(f)
def train_model(self):
self.words = []
self.classes = []
documents = []
ignore_letters = ['!', '?', ',', '.']
for intent in self.intents['intents']:
for pattern in intent['patterns']:
word = nltk.word_tokenize(pattern)
self.words.extend(word)
documents.append((word, intent['tag']))
if intent['tag'] not in self.classes:
self.classes.append(intent['tag'])
self.words = [self.lemmatizer.lemmatize(w.lower()) for w in self.words if w not in ignore_letters]
self.words = sorted(list(set(self.words)))
self.classes = sorted(list(set(self.classes)))
training = []
output_empty = [0] * len(self.classes)
for doc in documents:
bag = []
word_patterns = doc[0]
word_patterns = [self.lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in self.words:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[self.classes.index(doc[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[:, 0])
train_y = list(training[:, 1])
self.model = Sequential()
self.model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(64, activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(len(train_y[0]), activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
self.hist = self.model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
def save_model(self, model_name=None):
if model_name is None:
self.model.save(f"{self.model_name}.h5", self.hist)
with open(f'{self.model_name}_words.pkl', 'wb') as f:
pickle.dump(self.words, f)
with open(f'{self.model_name}_classes.pkl', 'wb') as f:
pickle.dump(self.classes, f)
else:
self.model.save(f"{model_name}.h5", self.hist)
with open(f'{model_name}_words.pkl', 'wb') as f:
pickle.dump(self.words, f)
with open(f'{model_name}_classes.pkl', 'wb') as f:
pickle.dump(self.classes, f)
def load_model(self, model_name=None):
if model_name is None:
with open(f'{self.model_name}_words.pkl', 'rb') as f:
self.words = pickle.load(f)
with open(f'{self.model_name}_classes.pkl', 'rb') as f:
self.classes = pickle.load(f)
self.model = load_model(f'{self.model_name}.h5')
else:
with open(f'{model_name}_words.pkl', 'rb') as f:
self.words = pickle.load(f)
with open(f'{model_name}_classes.pkl', 'rb') as f:
self.classes = pickle.load(f)
self.model = load_model(f'{model_name}.h5')
def _clean_up_sentence(self, sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [self.lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
def _bag_of_words(self, sentence, words):
sentence_words = self._clean_up_sentence(sentence)
bag = [0] * len(words)
for s in sentence_words:
for i, word in enumerate(words):
if word == s:
bag[i] = 1
return np.array(bag)
def _predict_class(self, sentence):
p = self._bag_of_words(sentence, self.words)
res = self.model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.1
results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({'intent': self.classes[r[0]], 'probability': str(r[1])})
return return_list
def _get_response(self, ints, intents_json):
try:
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if i['tag'] == tag:
result = random.choice(i['responses'])
break
except IndexError:
result = "I don't understand!"
return result
def request_tag(self, message):
pass
def get_tag_by_id(self, id):
pass
def request_method(self, message):
pass
def request(self, message):
ints = self._predict_class(message)
if ints[0]['intent'] in self.intent_methods.keys():
self.intent_methods[ints[0]['intent']]()
else:
return self._get_response(ints, self.intents) | from abc import ABCMeta, abstractmethod
import random
import json
import pickle
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import nltk
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.models import load_model
nltk.download('punkt', quiet=True)
nltk.download('wordnet', quiet=True)
class IAssistant(metaclass=ABCMeta):
@abstractmethod
def train_model(self):
""" Implemented in child class """
@abstractmethod
def request_tag(self, message):
""" Implemented in child class """
@abstractmethod
def get_tag_by_id(self, id):
""" Implemented in child class """
@abstractmethod
def request_method(self, message):
""" Implemented in child class """
@abstractmethod
def request(self, message):
""" Implemented in child class """
class GenericAssistant(IAssistant):
def __init__(self, intents, intent_methods={}, model_name="assistant_model", *, json_encoding='utf-8'):
self.intents = intents
self.intent_methods = intent_methods
self.model_name = model_name
self.json_encoding = json_encoding
if intents.endswith(".json"):
self.load_json_intents(intents)
self.lemmatizer = WordNetLemmatizer()
def load_json_intents(self, intents):
with open(intents, encoding=self.json_encoding) as f:
self.intents = json.load(f)
def train_model(self):
self.words = []
self.classes = []
documents = []
ignore_letters = ['!', '?', ',', '.']
for intent in self.intents['intents']:
for pattern in intent['patterns']:
word = nltk.word_tokenize(pattern)
self.words.extend(word)
documents.append((word, intent['tag']))
if intent['tag'] not in self.classes:
self.classes.append(intent['tag'])
self.words = [self.lemmatizer.lemmatize(w.lower()) for w in self.words if w not in ignore_letters]
self.words = sorted(list(set(self.words)))
self.classes = sorted(list(set(self.classes)))
training = []
output_empty = [0] * len(self.classes)
for doc in documents:
bag = []
word_patterns = doc[0]
word_patterns = [self.lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in self.words:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[self.classes.index(doc[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[:, 0])
train_y = list(training[:, 1])
self.model = Sequential()
self.model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(64, activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(len(train_y[0]), activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
self.hist = self.model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
def save_model(self, model_name=None):
if model_name is None:
self.model.save(f"{self.model_name}.h5", self.hist)
with open(f'{self.model_name}_words.pkl', 'wb') as f:
pickle.dump(self.words, f)
with open(f'{self.model_name}_classes.pkl', 'wb') as f:
pickle.dump(self.classes, f)
else:
self.model.save(f"{model_name}.h5", self.hist)
with open(f'{model_name}_words.pkl', 'wb') as f:
pickle.dump(self.words, f)
with open(f'{model_name}_classes.pkl', 'wb') as f:
pickle.dump(self.classes, f)
def load_model(self, model_name=None):
if model_name is None:
with open(f'{self.model_name}_words.pkl', 'rb') as f:
self.words = pickle.load(f)
with open(f'{self.model_name}_classes.pkl', 'rb') as f:
self.classes = pickle.load(f)
self.model = load_model(f'{self.model_name}.h5')
else:
with open(f'{model_name}_words.pkl', 'rb') as f:
self.words = pickle.load(f)
with open(f'{model_name}_classes.pkl', 'rb') as f:
self.classes = pickle.load(f)
self.model = load_model(f'{model_name}.h5')
def _clean_up_sentence(self, sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [self.lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
def _bag_of_words(self, sentence, words):
sentence_words = self._clean_up_sentence(sentence)
bag = [0] * len(words)
for s in sentence_words:
for i, word in enumerate(words):
if word == s:
bag[i] = 1
return np.array(bag)
def _predict_class(self, sentence):
p = self._bag_of_words(sentence, self.words)
res = self.model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.1
results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({'intent': self.classes[r[0]], 'probability': str(r[1])})
return return_list
def _get_response(self, ints, intents_json):
try:
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if i['tag'] == tag:
result = random.choice(i['responses'])
break
except IndexError:
result = "I don't understand!"
return result
def request_tag(self, message):
pass
def get_tag_by_id(self, id):
pass
def request_method(self, message):
pass
def request(self, message):
ints = self._predict_class(message)
if ints[0]['intent'] in self.intent_methods.keys():
self.intent_methods[ints[0]['intent']]()
else:
return self._get_response(ints, self.intents) | en | 0.879207 | Implemented in child class Implemented in child class Implemented in child class Implemented in child class Implemented in child class | 2.279748 | 2 |
cogs/TieThePie.py | Engineer152/Engineer-Bot | 0 | 10013 | <reponame>Engineer152/Engineer-Bot
import discord
from discord.ext import commands
client = commands.Bot(command_prefix='your prefix',owner_ids = {your user id},case_insensitive=True )
class TieThePie(commands.Cog):
def __init__(self,client):
self.client=client
@commands.command()
async def tiethepie(self,ctx):
embed=discord.Embed(title="**Tie The Pie**",color=0x46e2ec,description='Subscribe to Dude Perfect to see the reveal of Panda\n**[Details](https://youtu.be/bFUZ5gruc0E)**ㅤㅤㅤㅤ**[Subscribe](http://bit.ly/SubDudePerfect)**')
await ctx.send(embed=embed)
def setup(client):
client.add_cog(TieThePie(client)) | import discord
from discord.ext import commands
client = commands.Bot(command_prefix='your prefix',owner_ids = {your user id},case_insensitive=True )
class TieThePie(commands.Cog):
def __init__(self,client):
self.client=client
@commands.command()
async def tiethepie(self,ctx):
embed=discord.Embed(title="**Tie The Pie**",color=0x46e2ec,description='Subscribe to Dude Perfect to see the reveal of Panda\n**[Details](https://youtu.be/bFUZ5gruc0E)**ㅤㅤㅤㅤ**[Subscribe](http://bit.ly/SubDudePerfect)**')
await ctx.send(embed=embed)
def setup(client):
client.add_cog(TieThePie(client)) | none | 1 | 2.763343 | 3 |
|
python/lsst/eotest/simulation/generate_Fe55_images.py | tguillemLSST/eotest | 3 | 10014 | """
@brief Generate Fe55 images and associated darks and bias images
according to section 5.4 of the E/O document (Dec 19, 2012 version).
@author <NAME> <<EMAIL>>
"""
import os
import numpy as np
from sim_inputs import *
from sim_tools import *
def generate_Fe55_images(exptimes, nxrays, outdir, sensorid, gain=gain,
bias_level=bias_level, sys_noise=sys_noise,
dark_current=dark_current):
nexp = len(exptimes)
for i, exptime, nxray in zip(list(range(nexp)), exptimes, nxrays):
#
# Bias images
#
outfile = "Fe55_bias_%s_%02i.fits" % (sensorid, i)
bias_file = os.path.join(outdir, outfile)
bias_segs = []
for hdu in range(nhdu):
seg = SegmentExposure(exptime=0, gain=gain)
seg.add_bias(level=bias_level, sigma=sys_noise) # electronics
seg.add_bias(level=0, sigma=read_noise) # read noise
bias_segs.append(seg)
bias_output = fitsFile(bias_segs)
bias_output[0].header['GAIN'] = gain
bias_output[0].header['BIASLVL'] = bias_level
bias_output[0].header['SYSNOISE'] = sys_noise
bias_output[0].header['RDNOISE'] = read_noise
bias_output.writeto(bias_file, overwrite=True)
#
# Dark images
#
outfile = "Fe55_dark_%s_%02i.fits" % (sensorid, i)
dark_file = os.path.join(outdir, outfile)
dark_segs = []
for hdu in range(nhdu):
seg = SegmentExposure(exptime=exptime, gain=gain)
seg.add_bias(level=bias_level, sigma=sys_noise) # electronics
seg.add_bias(level=0, sigma=read_noise) # read noise
seg.add_dark_current(level=dark_current) # dark current
dark_segs.append(seg)
dark_output = fitsFile(dark_segs)
dark_output[0].header['GAIN'] = gain
dark_output[0].header['BIASLVL'] = bias_level
dark_output[0].header['SYSNOISE'] = sys_noise
dark_output[0].header['RDNOISE'] = read_noise
dark_output[0].header['DARKCURR'] = dark_current
dark_output.writeto(dark_file, overwrite=True)
#
# Fe55 exposures
#
outfile = "Fe55_exp_%s_%02i.fits" % (sensorid, i)
Fe55_file = os.path.join(outdir, outfile)
fe55_segs = []
for hdu in range(nhdu):
seg = SegmentExposure(exptime=exptime, gain=gain)
seg.add_bias(level=bias_level, sigma=sys_noise) # electronics
seg.add_bias(level=0, sigma=read_noise) # read noise
seg.add_dark_current(level=dark_current) # dark current
seg.add_Fe55_hits(nxrays=nxray)
fe55_segs.append(seg)
fe55_output = fitsFile(fe55_segs)
fe55_output[0].header['GAIN'] = gain
fe55_output[0].header['BIASLVL'] = bias_level
fe55_output[0].header['SYSNOISE'] = sys_noise
fe55_output[0].header['RDNOISE'] = read_noise
fe55_output[0].header['DARKCURR'] = dark_current
fe55_output[0].header['FE55HITS'] = nxray
fe55_output.writeto(Fe55_file, overwrite=True)
if __name__ == '__main__':
nexp = 10
exptimes = np.linspace(1, 5, nexp)
nxrays = [int(x*1000) for x in exptimes]
generate_Fe55_images(exptimes, nxrays, '.', 'xxx-xx')
| """
@brief Generate Fe55 images and associated darks and bias images
according to section 5.4 of the E/O document (Dec 19, 2012 version).
@author <NAME> <<EMAIL>>
"""
import os
import numpy as np
from sim_inputs import *
from sim_tools import *
def generate_Fe55_images(exptimes, nxrays, outdir, sensorid, gain=gain,
bias_level=bias_level, sys_noise=sys_noise,
dark_current=dark_current):
nexp = len(exptimes)
for i, exptime, nxray in zip(list(range(nexp)), exptimes, nxrays):
#
# Bias images
#
outfile = "Fe55_bias_%s_%02i.fits" % (sensorid, i)
bias_file = os.path.join(outdir, outfile)
bias_segs = []
for hdu in range(nhdu):
seg = SegmentExposure(exptime=0, gain=gain)
seg.add_bias(level=bias_level, sigma=sys_noise) # electronics
seg.add_bias(level=0, sigma=read_noise) # read noise
bias_segs.append(seg)
bias_output = fitsFile(bias_segs)
bias_output[0].header['GAIN'] = gain
bias_output[0].header['BIASLVL'] = bias_level
bias_output[0].header['SYSNOISE'] = sys_noise
bias_output[0].header['RDNOISE'] = read_noise
bias_output.writeto(bias_file, overwrite=True)
#
# Dark images
#
outfile = "Fe55_dark_%s_%02i.fits" % (sensorid, i)
dark_file = os.path.join(outdir, outfile)
dark_segs = []
for hdu in range(nhdu):
seg = SegmentExposure(exptime=exptime, gain=gain)
seg.add_bias(level=bias_level, sigma=sys_noise) # electronics
seg.add_bias(level=0, sigma=read_noise) # read noise
seg.add_dark_current(level=dark_current) # dark current
dark_segs.append(seg)
dark_output = fitsFile(dark_segs)
dark_output[0].header['GAIN'] = gain
dark_output[0].header['BIASLVL'] = bias_level
dark_output[0].header['SYSNOISE'] = sys_noise
dark_output[0].header['RDNOISE'] = read_noise
dark_output[0].header['DARKCURR'] = dark_current
dark_output.writeto(dark_file, overwrite=True)
#
# Fe55 exposures
#
outfile = "Fe55_exp_%s_%02i.fits" % (sensorid, i)
Fe55_file = os.path.join(outdir, outfile)
fe55_segs = []
for hdu in range(nhdu):
seg = SegmentExposure(exptime=exptime, gain=gain)
seg.add_bias(level=bias_level, sigma=sys_noise) # electronics
seg.add_bias(level=0, sigma=read_noise) # read noise
seg.add_dark_current(level=dark_current) # dark current
seg.add_Fe55_hits(nxrays=nxray)
fe55_segs.append(seg)
fe55_output = fitsFile(fe55_segs)
fe55_output[0].header['GAIN'] = gain
fe55_output[0].header['BIASLVL'] = bias_level
fe55_output[0].header['SYSNOISE'] = sys_noise
fe55_output[0].header['RDNOISE'] = read_noise
fe55_output[0].header['DARKCURR'] = dark_current
fe55_output[0].header['FE55HITS'] = nxray
fe55_output.writeto(Fe55_file, overwrite=True)
if __name__ == '__main__':
nexp = 10
exptimes = np.linspace(1, 5, nexp)
nxrays = [int(x*1000) for x in exptimes]
generate_Fe55_images(exptimes, nxrays, '.', 'xxx-xx')
| en | 0.74068 | @brief Generate Fe55 images and associated darks and bias images according to section 5.4 of the E/O document (Dec 19, 2012 version). @author <NAME> <<EMAIL>> # # Bias images # # electronics # read noise # # Dark images # # electronics # read noise # dark current # # Fe55 exposures # # electronics # read noise # dark current | 2.151539 | 2 |
pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py | rayguan97/M3DETR | 21 | 10015 | import math
import numpy as np
import torch
import torch.nn as nn
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from ....utils import common_utils
from ...backbones_2d.transformer import TransformerEncoderLayer3D, TransformerEncoder
from ...roi_heads.target_assigner.proposal_target_layer import ProposalTargetLayer
from ...model_utils.model_nms_utils import class_agnostic_nms
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
def sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000):
"""
Args:
rois: (M, 7 + C)
points: (N, 3)
sample_radius_with_roi:
num_max_points_of_part:
Returns:
sampled_points: (N_out, 3)
"""
if points.shape[0] < num_max_points_of_part:
distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
point_mask = min_dis < roi_max_dim + sample_radius_with_roi
else:
start_idx = 0
point_mask_list = []
while start_idx < points.shape[0]:
distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi
point_mask_list.append(cur_point_mask)
start_idx += num_max_points_of_part
point_mask = torch.cat(point_mask_list, dim=0)
sampled_points = points[:1] if point_mask.sum() == 0 else points[point_mask, :]
return sampled_points, point_mask
def sector_fps(points, num_sampled_points, num_sectors):
"""
Args:
points: (N, 3)
num_sampled_points: int
num_sectors: int
Returns:
sampled_points: (N_out, 3)
"""
sector_size = np.pi * 2 / num_sectors
point_angles = torch.atan2(points[:, 1], points[:, 0]) + np.pi
sector_idx = (point_angles / sector_size).floor().clamp(min=0, max=num_sectors)
xyz_points_list = []
xyz_batch_cnt = []
num_sampled_points_list = []
for k in range(num_sectors):
mask = (sector_idx == k)
cur_num_points = mask.sum().item()
if cur_num_points > 0:
xyz_points_list.append(points[mask])
xyz_batch_cnt.append(cur_num_points)
ratio = cur_num_points / points.shape[0]
num_sampled_points_list.append(
min(cur_num_points, math.ceil(ratio * num_sampled_points))
)
if len(xyz_batch_cnt) == 0:
xyz_points_list.append(points)
xyz_batch_cnt.append(len(points))
num_sampled_points_list.append(num_sampled_points)
print(f'Warning: empty sector points detected in SectorFPS: points.shape={points.shape}')
xyz = torch.cat(xyz_points_list, dim=0)
xyz_batch_cnt = torch.tensor(xyz_batch_cnt, device=points.device).int()
sampled_points_batch_cnt = torch.tensor(num_sampled_points_list, device=points.device).int()
sampled_pt_idxs = pointnet2_stack_utils.stack_farthest_point_sample(
xyz.contiguous(), xyz_batch_cnt, sampled_points_batch_cnt
).long()
sampled_points = xyz[sampled_pt_idxs]
return sampled_points
class VoxelSetAbstractionTransFusionv5(nn.Module):
def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.linears_in = nn.ModuleList()
self.linears_out = nn.ModuleList()
self.fusion_channel = sum([x[-1] for x in SA_cfg[self.model_cfg.FEATURES_SOURCE[-2]].MLPS])
# self.fusion_channel = 16
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
if 'bev' in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if c_bev == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(c_bev, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, c_bev, bias=False),
nn.BatchNorm1d(c_bev)))
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
mlps = SA_cfg['raw_points'].MLPS
for k in range(len(mlps)):
mlps[k] = [num_rawpoint_features - 3] + mlps[k]
self.SA_rawpoints = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg['raw_points'].POOL_RADIUS,
nsamples=SA_cfg['raw_points'].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method='max_pool'
)
cur = sum([x[-1] for x in mlps])
if cur == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(cur, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, cur, bias=False),
nn.BatchNorm1d(cur)))
c_in += cur
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
mlps = SA_cfg[src_name].MLPS
for k in range(len(mlps)):
mlps[k] = [mlps[k][0]] + mlps[k]
cur_layer = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg[src_name].POOL_RADIUS,
nsamples=SA_cfg[src_name].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method='max_pool',
)
self.SA_layers.append(cur_layer)
cur = sum([x[-1] for x in mlps])
if cur == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(cur, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, cur, bias=False),
nn.BatchNorm1d(cur)))
self.SA_layer_names.append(src_name)
c_in += cur
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
if self.model_cfg.NORM:
self.transnorm = nn.LayerNorm(c_in)
else:
self.transnorm = None
if self.model_cfg.NORM2:
self.transnorm2 = nn.LayerNorm(self.fusion_channel)
else:
self.transnorm2 = None
# multi_location
self.trans_layer = TransformerEncoder(TransformerEncoderLayer3D(c_in, self.model_cfg.FUSION_HEAD), self.model_cfg.NUM_LAYERS, self.transnorm)
# have multi-modality + multi-scale
self.trans_fusion_layer = TransformerEncoder(TransformerEncoderLayer3D(self.fusion_channel, self.model_cfg.FUSION2_HEAD), self.model_cfg.NUM_LAYERS2, self.transnorm2)
self.reduce_radius = self.model_cfg.REDUCE_RADIUS**2
self.topks = self.model_cfg.NMS_CONFIG.TOPK
self.max_keypoints = self.model_cfg.NMS_CONFIG.MAX_POINTS
self.res1_actn_1 = nn.Sequential(
nn.LayerNorm(c_in),
nn.ReLU())
self.res1_actn_2 = nn.Sequential(
nn.LayerNorm(c_in),
nn.ReLU())
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
x_idxs = (keypoints[:, :, 0] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, :, 1] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
cur_x_idxs = x_idxs[k]
cur_y_idxs = y_idxs[k]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features.unsqueeze(dim=0))
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (B, N, C0)
return point_bev_features
def get_sampled_points(self, batch_dict):
batch_size = batch_dict['batch_size']
if self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
elif self.model_cfg.POINT_SOURCE == 'voxel_centers':
src_points = common_utils.get_voxel_centers(
batch_dict['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == 'FPS':
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
empty_num = self.model_cfg.NUM_KEYPOINTS - sampled_points.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == 'FastFPS':
raise NotImplementedError
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoints
def get_sampled_points_post(self, batch_dict, keypoints):
batch_size = batch_dict['batch_size']
src_points = keypoints
keypoints_list = []
for bs_idx in range(batch_size):
sampled_points = src_points[bs_idx].unsqueeze(dim=0) # (1, N, 3)
if sampled_points.shape[1] < self.max_keypoints:
cur_count = sampled_points.shape[1]
cur_pt_idxs = torch.arange(0, self.max_keypoints)
empty_num = self.max_keypoints - cur_count
while empty_num >= cur_count:
cur_pt_idxs[cur_count:cur_count * 2] = cur_pt_idxs[:cur_count]
empty_num -= cur_count
cur_count *= 2
if cur_count < self.max_keypoints:
assert empty_num == self.max_keypoints - cur_count
cur_pt_idxs[-empty_num:] = cur_pt_idxs[:empty_num]
keypoint = sampled_points[0][cur_pt_idxs].unsqueeze(dim=0)
else:
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.max_keypoints
).long()
if sampled_points.shape[1] < self.max_keypoints:
empty_num = self.max_keypoints - sampled_points.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
keypoint = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
keypoints_list.append(keypoint)
keypoint = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoint
def reduce_points(self, batch_dict):
batch_indices = batch_dict['points'][:, 0].long()
masks = []
for bs_idx, roi in enumerate(batch_dict['batch_cls_preds']):
bs_mask = (batch_indices == bs_idx)
pts = batch_dict['points'][bs_mask].unsqueeze(dim=1)[:, :, 1: 4] # (N, 1, 3)
s, _ = torch.max(batch_dict['batch_cls_preds'][bs_idx], dim=1)
top, idx = torch.topk(s, self.topks)
c = batch_dict['batch_box_preds'][bs_idx][idx][:, :3].unsqueeze(dim=0)
dist = (pts - c)**2
dist, _ = dist.sum(dim=-1).min(dim=1)
mask = (dist <= self.reduce_radius)
masks.extend(mask)
batch_dict['points'] = batch_dict['points'][masks]
return batch_dict
def reduce_points_post(self, keypoints, batch_dict):
keypoints_list = []
for bs_idx, roi in enumerate(batch_dict['batch_cls_preds']):
pts = keypoints[bs_idx].unsqueeze(dim=1)
s, _ = torch.max(batch_dict['batch_cls_preds'][bs_idx], dim=1)
top, idx = torch.topk(s, self.topks)
c = batch_dict['batch_box_preds'][bs_idx][idx][:, :3].unsqueeze(dim=0)
dist = (pts - c)**2
dist, _ = dist.sum(dim=-1).min(dim=1)
mask = (dist <= self.reduce_radius)
keypoints_list.append(keypoints[bs_idx][mask])
return keypoints_list
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
if self.model_cfg.POINT_SOURCE == 'raw_points' and self.reduce_radius > 0:
# batch_dict = self.reduce_points(batch_dict)
keypoints = self.get_sampled_points(batch_dict)
keypoint_lst = self.reduce_points_post(keypoints, batch_dict)
keypoints = self.get_sampled_points_post(batch_dict, keypoint_lst)
else:
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride']
)
point_features_list.append(point_bev_features)
batch_size, num_keypoints, _ = keypoints.shape
new_xyz = keypoints.view(-1, 3)
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int().fill_(num_keypoints)
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict['points']
xyz = raw_points[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (raw_points[:, 0] == bs_idx).sum()
point_features = raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None
pooled_points, pooled_features = self.SA_rawpoints(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features,
)
point_features_list.append(pooled_features.view(batch_size, num_keypoints, -1))
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4],
downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum()
pooled_points, pooled_features = self.SA_layers[k](
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=batch_dict['multi_scale_3d_features'][src_name].features.contiguous(),
)
point_features_list.append(pooled_features.view(batch_size, num_keypoints, -1))
point_features_list_new = []
for i, x in enumerate(point_features_list):
feat = self.linears_in[i](x.view(batch_size * num_keypoints, -1))
point_features_list_new.append(feat.view(1, batch_size * num_keypoints, -1))
fusion_feat = torch.cat(point_features_list_new, dim=0)
# have multi-modality + multi-scale
trans1_feat_list = self.trans_fusion_layer(fusion_feat).view(len(fusion_feat), batch_size, num_keypoints, -1)
trans1_feat_projected_list = []
for i, x in enumerate(trans1_feat_list):
feat = self.linears_out[i](x.view(batch_size * num_keypoints, -1))
trans1_feat_projected_list.append(feat.view(batch_size, num_keypoints, -1))
# multi_location
point_features_main1 = torch.cat(point_features_list, dim=2)
point_features_res1 = self.res1_actn_1(torch.cat(trans1_feat_projected_list, dim=2))
point_features_main2 = point_features_res1 + point_features_main1
point_features_res2 = self.res1_actn_2(self.trans_layer(point_features_main2.permute(1, 0, 2)).permute(1, 0, 2))
point_features = point_features_main2 + point_features_res2
batch_idx = torch.arange(batch_size, device=keypoints.device).view(-1, 1).repeat(1, keypoints.shape[1]).view(-1)
point_coords = torch.cat((batch_idx.view(-1, 1).float(), keypoints.view(-1, 3)), dim=1)
batch_dict['point_features_before_fusion'] = point_features.reshape(-1, point_features.shape[-1])
point_features = self.vsa_point_feature_fusion(point_features.reshape(-1, point_features.shape[-1]))
batch_dict['point_features'] = point_features # (BxN, C)
batch_dict['point_coords'] = point_coords # (BxN, 4)
return batch_dict
class VoxelSetAbstraction(nn.Module):
def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
if SA_cfg[src_name].get('INPUT_CHANNELS', None) is None:
input_channels = SA_cfg[src_name].MLPS[0][0] \
if isinstance(SA_cfg[src_name].MLPS[0], list) else SA_cfg[src_name].MLPS[0]
else:
input_channels = SA_cfg[src_name]['INPUT_CHANNELS']
cur_layer, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels, config=SA_cfg[src_name]
)
self.SA_layers.append(cur_layer)
self.SA_layer_names.append(src_name)
c_in += cur_num_c_out
if 'bev' in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
self.SA_rawpoints, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=num_rawpoint_features - 3, config=SA_cfg['raw_points']
)
c_in += cur_num_c_out
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
"""
Args:
keypoints: (N1 + N2 + ..., 4)
bev_features: (B, C, H, W)
batch_size:
bev_stride:
Returns:
point_bev_features: (N1 + N2 + ..., C)
"""
x_idxs = (keypoints[:, 1] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, 2] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
bs_mask = (keypoints[:, 0] == k)
cur_x_idxs = x_idxs[bs_mask]
cur_y_idxs = y_idxs[bs_mask]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features)
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C)
return point_bev_features
def sectorized_proposal_centric_sampling(self, roi_boxes, points):
"""
Args:
roi_boxes: (M, 7 + C)
points: (N, 3)
Returns:
sampled_points: (N_out, 3)
"""
sampled_points, _ = sample_points_with_roi(
rois=roi_boxes, points=points,
sample_radius_with_roi=self.model_cfg.SPC_SAMPLING.SAMPLE_RADIUS_WITH_ROI,
num_max_points_of_part=self.model_cfg.SPC_SAMPLING.get('NUM_POINTS_OF_EACH_SAMPLE_PART', 200000)
)
sampled_points = sector_fps(
points=sampled_points, num_sampled_points=self.model_cfg.NUM_KEYPOINTS,
num_sectors=self.model_cfg.SPC_SAMPLING.NUM_SECTORS
)
return sampled_points
def get_sampled_points(self, batch_dict):
"""
Args:
batch_dict:
Returns:
keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z]
"""
batch_size = batch_dict['batch_size']
if self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
elif self.model_cfg.POINT_SOURCE == 'voxel_centers':
src_points = common_utils.get_voxel_centers(
batch_dict['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == 'FPS':
cur_pt_idxs = pointnet2_stack_utils.farthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
times = int(self.model_cfg.NUM_KEYPOINTS / sampled_points.shape[1]) + 1
non_empty = cur_pt_idxs[0, :sampled_points.shape[1]]
cur_pt_idxs[0] = non_empty.repeat(times)[:self.model_cfg.NUM_KEYPOINTS]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == 'SPC':
cur_keypoints = self.sectorized_proposal_centric_sampling(
roi_boxes=batch_dict['rois'][bs_idx], points=sampled_points[0]
)
bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx
keypoints = torch.cat((bs_idxs[:, None], cur_keypoints), dim=1)
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4)
if len(keypoints.shape) == 3:
batch_idx = torch.arange(batch_size, device=keypoints.device).view(-1, 1).repeat(1, keypoints.shape[1]).view(-1, 1)
keypoints = torch.cat((batch_idx.float(), keypoints.view(-1, 3)), dim=1)
return keypoints
@staticmethod
def aggregate_keypoint_features_from_one_source(
batch_size, aggregate_func, xyz, xyz_features, xyz_bs_idxs, new_xyz, new_xyz_batch_cnt,
filter_neighbors_with_roi=False, radius_of_neighbor=None, num_max_points_of_part=200000, rois=None
):
"""
Args:
aggregate_func:
xyz: (N, 3)
xyz_features: (N, C)
xyz_bs_idxs: (N)
new_xyz: (M, 3)
new_xyz_batch_cnt: (batch_size), [N1, N2, ...]
filter_neighbors_with_roi: True/False
radius_of_neighbor: float
num_max_points_of_part: int
rois: (batch_size, num_rois, 7 + C)
Returns:
"""
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
if filter_neighbors_with_roi:
point_features = torch.cat((xyz, xyz_features), dim=-1) if xyz_features is not None else xyz
point_features_list = []
for bs_idx in range(batch_size):
bs_mask = (xyz_bs_idxs == bs_idx)
_, valid_mask = sample_points_with_roi(
rois=rois[bs_idx], points=xyz[bs_mask],
sample_radius_with_roi=radius_of_neighbor, num_max_points_of_part=num_max_points_of_part,
)
point_features_list.append(point_features[bs_mask][valid_mask])
xyz_batch_cnt[bs_idx] = valid_mask.sum()
valid_point_features = torch.cat(point_features_list, dim=0)
xyz = valid_point_features[:, 0:3]
xyz_features = valid_point_features[:, 3:] if xyz_features is not None else None
else:
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (xyz_bs_idxs == bs_idx).sum()
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features.contiguous(),
)
return pooled_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride']
)
point_features_list.append(point_bev_features)
batch_size = batch_dict['batch_size']
new_xyz = keypoints[:, 1:4].contiguous()
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int()
for k in range(batch_size):
new_xyz_batch_cnt[k] = (keypoints[:, 0] == k).sum()
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict['points']
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_rawpoints,
xyz=raw_points[:, 1:4],
xyz_features=raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None,
xyz_bs_idxs=raw_points[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER['raw_points'].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER['raw_points'].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None)
)
point_features_list.append(pooled_features)
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices
cur_features = batch_dict['multi_scale_3d_features'][src_name].features.contiguous()
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4], downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range
)
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_layers[k],
xyz=xyz.contiguous(), xyz_features=cur_features, xyz_bs_idxs=cur_coords[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None)
)
point_features_list.append(pooled_features)
point_features = torch.cat(point_features_list, dim=-1)
batch_dict['point_features_before_fusion'] = point_features.view(-1, point_features.shape[-1])
point_features = self.vsa_point_feature_fusion(point_features.view(-1, point_features.shape[-1]))
batch_dict['point_features'] = point_features # (BxN, C)
batch_dict['point_coords'] = keypoints # (BxN, 4)
return batch_dict
| import math
import numpy as np
import torch
import torch.nn as nn
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from ....utils import common_utils
from ...backbones_2d.transformer import TransformerEncoderLayer3D, TransformerEncoder
from ...roi_heads.target_assigner.proposal_target_layer import ProposalTargetLayer
from ...model_utils.model_nms_utils import class_agnostic_nms
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
def sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000):
"""
Args:
rois: (M, 7 + C)
points: (N, 3)
sample_radius_with_roi:
num_max_points_of_part:
Returns:
sampled_points: (N_out, 3)
"""
if points.shape[0] < num_max_points_of_part:
distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
point_mask = min_dis < roi_max_dim + sample_radius_with_roi
else:
start_idx = 0
point_mask_list = []
while start_idx < points.shape[0]:
distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi
point_mask_list.append(cur_point_mask)
start_idx += num_max_points_of_part
point_mask = torch.cat(point_mask_list, dim=0)
sampled_points = points[:1] if point_mask.sum() == 0 else points[point_mask, :]
return sampled_points, point_mask
def sector_fps(points, num_sampled_points, num_sectors):
"""
Args:
points: (N, 3)
num_sampled_points: int
num_sectors: int
Returns:
sampled_points: (N_out, 3)
"""
sector_size = np.pi * 2 / num_sectors
point_angles = torch.atan2(points[:, 1], points[:, 0]) + np.pi
sector_idx = (point_angles / sector_size).floor().clamp(min=0, max=num_sectors)
xyz_points_list = []
xyz_batch_cnt = []
num_sampled_points_list = []
for k in range(num_sectors):
mask = (sector_idx == k)
cur_num_points = mask.sum().item()
if cur_num_points > 0:
xyz_points_list.append(points[mask])
xyz_batch_cnt.append(cur_num_points)
ratio = cur_num_points / points.shape[0]
num_sampled_points_list.append(
min(cur_num_points, math.ceil(ratio * num_sampled_points))
)
if len(xyz_batch_cnt) == 0:
xyz_points_list.append(points)
xyz_batch_cnt.append(len(points))
num_sampled_points_list.append(num_sampled_points)
print(f'Warning: empty sector points detected in SectorFPS: points.shape={points.shape}')
xyz = torch.cat(xyz_points_list, dim=0)
xyz_batch_cnt = torch.tensor(xyz_batch_cnt, device=points.device).int()
sampled_points_batch_cnt = torch.tensor(num_sampled_points_list, device=points.device).int()
sampled_pt_idxs = pointnet2_stack_utils.stack_farthest_point_sample(
xyz.contiguous(), xyz_batch_cnt, sampled_points_batch_cnt
).long()
sampled_points = xyz[sampled_pt_idxs]
return sampled_points
class VoxelSetAbstractionTransFusionv5(nn.Module):
def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.linears_in = nn.ModuleList()
self.linears_out = nn.ModuleList()
self.fusion_channel = sum([x[-1] for x in SA_cfg[self.model_cfg.FEATURES_SOURCE[-2]].MLPS])
# self.fusion_channel = 16
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
if 'bev' in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if c_bev == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(c_bev, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, c_bev, bias=False),
nn.BatchNorm1d(c_bev)))
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
mlps = SA_cfg['raw_points'].MLPS
for k in range(len(mlps)):
mlps[k] = [num_rawpoint_features - 3] + mlps[k]
self.SA_rawpoints = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg['raw_points'].POOL_RADIUS,
nsamples=SA_cfg['raw_points'].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method='max_pool'
)
cur = sum([x[-1] for x in mlps])
if cur == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(cur, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, cur, bias=False),
nn.BatchNorm1d(cur)))
c_in += cur
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
mlps = SA_cfg[src_name].MLPS
for k in range(len(mlps)):
mlps[k] = [mlps[k][0]] + mlps[k]
cur_layer = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg[src_name].POOL_RADIUS,
nsamples=SA_cfg[src_name].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method='max_pool',
)
self.SA_layers.append(cur_layer)
cur = sum([x[-1] for x in mlps])
if cur == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(cur, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, cur, bias=False),
nn.BatchNorm1d(cur)))
self.SA_layer_names.append(src_name)
c_in += cur
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
if self.model_cfg.NORM:
self.transnorm = nn.LayerNorm(c_in)
else:
self.transnorm = None
if self.model_cfg.NORM2:
self.transnorm2 = nn.LayerNorm(self.fusion_channel)
else:
self.transnorm2 = None
# multi_location
self.trans_layer = TransformerEncoder(TransformerEncoderLayer3D(c_in, self.model_cfg.FUSION_HEAD), self.model_cfg.NUM_LAYERS, self.transnorm)
# have multi-modality + multi-scale
self.trans_fusion_layer = TransformerEncoder(TransformerEncoderLayer3D(self.fusion_channel, self.model_cfg.FUSION2_HEAD), self.model_cfg.NUM_LAYERS2, self.transnorm2)
self.reduce_radius = self.model_cfg.REDUCE_RADIUS**2
self.topks = self.model_cfg.NMS_CONFIG.TOPK
self.max_keypoints = self.model_cfg.NMS_CONFIG.MAX_POINTS
self.res1_actn_1 = nn.Sequential(
nn.LayerNorm(c_in),
nn.ReLU())
self.res1_actn_2 = nn.Sequential(
nn.LayerNorm(c_in),
nn.ReLU())
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
x_idxs = (keypoints[:, :, 0] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, :, 1] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
cur_x_idxs = x_idxs[k]
cur_y_idxs = y_idxs[k]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features.unsqueeze(dim=0))
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (B, N, C0)
return point_bev_features
def get_sampled_points(self, batch_dict):
batch_size = batch_dict['batch_size']
if self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
elif self.model_cfg.POINT_SOURCE == 'voxel_centers':
src_points = common_utils.get_voxel_centers(
batch_dict['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == 'FPS':
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
empty_num = self.model_cfg.NUM_KEYPOINTS - sampled_points.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == 'FastFPS':
raise NotImplementedError
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoints
def get_sampled_points_post(self, batch_dict, keypoints):
batch_size = batch_dict['batch_size']
src_points = keypoints
keypoints_list = []
for bs_idx in range(batch_size):
sampled_points = src_points[bs_idx].unsqueeze(dim=0) # (1, N, 3)
if sampled_points.shape[1] < self.max_keypoints:
cur_count = sampled_points.shape[1]
cur_pt_idxs = torch.arange(0, self.max_keypoints)
empty_num = self.max_keypoints - cur_count
while empty_num >= cur_count:
cur_pt_idxs[cur_count:cur_count * 2] = cur_pt_idxs[:cur_count]
empty_num -= cur_count
cur_count *= 2
if cur_count < self.max_keypoints:
assert empty_num == self.max_keypoints - cur_count
cur_pt_idxs[-empty_num:] = cur_pt_idxs[:empty_num]
keypoint = sampled_points[0][cur_pt_idxs].unsqueeze(dim=0)
else:
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.max_keypoints
).long()
if sampled_points.shape[1] < self.max_keypoints:
empty_num = self.max_keypoints - sampled_points.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
keypoint = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
keypoints_list.append(keypoint)
keypoint = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoint
def reduce_points(self, batch_dict):
batch_indices = batch_dict['points'][:, 0].long()
masks = []
for bs_idx, roi in enumerate(batch_dict['batch_cls_preds']):
bs_mask = (batch_indices == bs_idx)
pts = batch_dict['points'][bs_mask].unsqueeze(dim=1)[:, :, 1: 4] # (N, 1, 3)
s, _ = torch.max(batch_dict['batch_cls_preds'][bs_idx], dim=1)
top, idx = torch.topk(s, self.topks)
c = batch_dict['batch_box_preds'][bs_idx][idx][:, :3].unsqueeze(dim=0)
dist = (pts - c)**2
dist, _ = dist.sum(dim=-1).min(dim=1)
mask = (dist <= self.reduce_radius)
masks.extend(mask)
batch_dict['points'] = batch_dict['points'][masks]
return batch_dict
def reduce_points_post(self, keypoints, batch_dict):
keypoints_list = []
for bs_idx, roi in enumerate(batch_dict['batch_cls_preds']):
pts = keypoints[bs_idx].unsqueeze(dim=1)
s, _ = torch.max(batch_dict['batch_cls_preds'][bs_idx], dim=1)
top, idx = torch.topk(s, self.topks)
c = batch_dict['batch_box_preds'][bs_idx][idx][:, :3].unsqueeze(dim=0)
dist = (pts - c)**2
dist, _ = dist.sum(dim=-1).min(dim=1)
mask = (dist <= self.reduce_radius)
keypoints_list.append(keypoints[bs_idx][mask])
return keypoints_list
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
if self.model_cfg.POINT_SOURCE == 'raw_points' and self.reduce_radius > 0:
# batch_dict = self.reduce_points(batch_dict)
keypoints = self.get_sampled_points(batch_dict)
keypoint_lst = self.reduce_points_post(keypoints, batch_dict)
keypoints = self.get_sampled_points_post(batch_dict, keypoint_lst)
else:
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride']
)
point_features_list.append(point_bev_features)
batch_size, num_keypoints, _ = keypoints.shape
new_xyz = keypoints.view(-1, 3)
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int().fill_(num_keypoints)
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict['points']
xyz = raw_points[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (raw_points[:, 0] == bs_idx).sum()
point_features = raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None
pooled_points, pooled_features = self.SA_rawpoints(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features,
)
point_features_list.append(pooled_features.view(batch_size, num_keypoints, -1))
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4],
downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum()
pooled_points, pooled_features = self.SA_layers[k](
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=batch_dict['multi_scale_3d_features'][src_name].features.contiguous(),
)
point_features_list.append(pooled_features.view(batch_size, num_keypoints, -1))
point_features_list_new = []
for i, x in enumerate(point_features_list):
feat = self.linears_in[i](x.view(batch_size * num_keypoints, -1))
point_features_list_new.append(feat.view(1, batch_size * num_keypoints, -1))
fusion_feat = torch.cat(point_features_list_new, dim=0)
# have multi-modality + multi-scale
trans1_feat_list = self.trans_fusion_layer(fusion_feat).view(len(fusion_feat), batch_size, num_keypoints, -1)
trans1_feat_projected_list = []
for i, x in enumerate(trans1_feat_list):
feat = self.linears_out[i](x.view(batch_size * num_keypoints, -1))
trans1_feat_projected_list.append(feat.view(batch_size, num_keypoints, -1))
# multi_location
point_features_main1 = torch.cat(point_features_list, dim=2)
point_features_res1 = self.res1_actn_1(torch.cat(trans1_feat_projected_list, dim=2))
point_features_main2 = point_features_res1 + point_features_main1
point_features_res2 = self.res1_actn_2(self.trans_layer(point_features_main2.permute(1, 0, 2)).permute(1, 0, 2))
point_features = point_features_main2 + point_features_res2
batch_idx = torch.arange(batch_size, device=keypoints.device).view(-1, 1).repeat(1, keypoints.shape[1]).view(-1)
point_coords = torch.cat((batch_idx.view(-1, 1).float(), keypoints.view(-1, 3)), dim=1)
batch_dict['point_features_before_fusion'] = point_features.reshape(-1, point_features.shape[-1])
point_features = self.vsa_point_feature_fusion(point_features.reshape(-1, point_features.shape[-1]))
batch_dict['point_features'] = point_features # (BxN, C)
batch_dict['point_coords'] = point_coords # (BxN, 4)
return batch_dict
class VoxelSetAbstraction(nn.Module):
def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
if SA_cfg[src_name].get('INPUT_CHANNELS', None) is None:
input_channels = SA_cfg[src_name].MLPS[0][0] \
if isinstance(SA_cfg[src_name].MLPS[0], list) else SA_cfg[src_name].MLPS[0]
else:
input_channels = SA_cfg[src_name]['INPUT_CHANNELS']
cur_layer, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels, config=SA_cfg[src_name]
)
self.SA_layers.append(cur_layer)
self.SA_layer_names.append(src_name)
c_in += cur_num_c_out
if 'bev' in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
self.SA_rawpoints, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=num_rawpoint_features - 3, config=SA_cfg['raw_points']
)
c_in += cur_num_c_out
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
"""
Args:
keypoints: (N1 + N2 + ..., 4)
bev_features: (B, C, H, W)
batch_size:
bev_stride:
Returns:
point_bev_features: (N1 + N2 + ..., C)
"""
x_idxs = (keypoints[:, 1] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, 2] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
bs_mask = (keypoints[:, 0] == k)
cur_x_idxs = x_idxs[bs_mask]
cur_y_idxs = y_idxs[bs_mask]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features)
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C)
return point_bev_features
def sectorized_proposal_centric_sampling(self, roi_boxes, points):
"""
Args:
roi_boxes: (M, 7 + C)
points: (N, 3)
Returns:
sampled_points: (N_out, 3)
"""
sampled_points, _ = sample_points_with_roi(
rois=roi_boxes, points=points,
sample_radius_with_roi=self.model_cfg.SPC_SAMPLING.SAMPLE_RADIUS_WITH_ROI,
num_max_points_of_part=self.model_cfg.SPC_SAMPLING.get('NUM_POINTS_OF_EACH_SAMPLE_PART', 200000)
)
sampled_points = sector_fps(
points=sampled_points, num_sampled_points=self.model_cfg.NUM_KEYPOINTS,
num_sectors=self.model_cfg.SPC_SAMPLING.NUM_SECTORS
)
return sampled_points
def get_sampled_points(self, batch_dict):
"""
Args:
batch_dict:
Returns:
keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z]
"""
batch_size = batch_dict['batch_size']
if self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
elif self.model_cfg.POINT_SOURCE == 'voxel_centers':
src_points = common_utils.get_voxel_centers(
batch_dict['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == 'FPS':
cur_pt_idxs = pointnet2_stack_utils.farthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
times = int(self.model_cfg.NUM_KEYPOINTS / sampled_points.shape[1]) + 1
non_empty = cur_pt_idxs[0, :sampled_points.shape[1]]
cur_pt_idxs[0] = non_empty.repeat(times)[:self.model_cfg.NUM_KEYPOINTS]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == 'SPC':
cur_keypoints = self.sectorized_proposal_centric_sampling(
roi_boxes=batch_dict['rois'][bs_idx], points=sampled_points[0]
)
bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx
keypoints = torch.cat((bs_idxs[:, None], cur_keypoints), dim=1)
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4)
if len(keypoints.shape) == 3:
batch_idx = torch.arange(batch_size, device=keypoints.device).view(-1, 1).repeat(1, keypoints.shape[1]).view(-1, 1)
keypoints = torch.cat((batch_idx.float(), keypoints.view(-1, 3)), dim=1)
return keypoints
@staticmethod
def aggregate_keypoint_features_from_one_source(
batch_size, aggregate_func, xyz, xyz_features, xyz_bs_idxs, new_xyz, new_xyz_batch_cnt,
filter_neighbors_with_roi=False, radius_of_neighbor=None, num_max_points_of_part=200000, rois=None
):
"""
Args:
aggregate_func:
xyz: (N, 3)
xyz_features: (N, C)
xyz_bs_idxs: (N)
new_xyz: (M, 3)
new_xyz_batch_cnt: (batch_size), [N1, N2, ...]
filter_neighbors_with_roi: True/False
radius_of_neighbor: float
num_max_points_of_part: int
rois: (batch_size, num_rois, 7 + C)
Returns:
"""
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
if filter_neighbors_with_roi:
point_features = torch.cat((xyz, xyz_features), dim=-1) if xyz_features is not None else xyz
point_features_list = []
for bs_idx in range(batch_size):
bs_mask = (xyz_bs_idxs == bs_idx)
_, valid_mask = sample_points_with_roi(
rois=rois[bs_idx], points=xyz[bs_mask],
sample_radius_with_roi=radius_of_neighbor, num_max_points_of_part=num_max_points_of_part,
)
point_features_list.append(point_features[bs_mask][valid_mask])
xyz_batch_cnt[bs_idx] = valid_mask.sum()
valid_point_features = torch.cat(point_features_list, dim=0)
xyz = valid_point_features[:, 0:3]
xyz_features = valid_point_features[:, 3:] if xyz_features is not None else None
else:
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (xyz_bs_idxs == bs_idx).sum()
pooled_points, pooled_features = aggregate_func(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features.contiguous(),
)
return pooled_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride']
)
point_features_list.append(point_bev_features)
batch_size = batch_dict['batch_size']
new_xyz = keypoints[:, 1:4].contiguous()
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int()
for k in range(batch_size):
new_xyz_batch_cnt[k] = (keypoints[:, 0] == k).sum()
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict['points']
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_rawpoints,
xyz=raw_points[:, 1:4],
xyz_features=raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None,
xyz_bs_idxs=raw_points[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER['raw_points'].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER['raw_points'].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None)
)
point_features_list.append(pooled_features)
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices
cur_features = batch_dict['multi_scale_3d_features'][src_name].features.contiguous()
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4], downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range
)
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size, aggregate_func=self.SA_layers[k],
xyz=xyz.contiguous(), xyz_features=cur_features, xyz_bs_idxs=cur_coords[:, 0],
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False),
radius_of_neighbor=self.model_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),
rois=batch_dict.get('rois', None)
)
point_features_list.append(pooled_features)
point_features = torch.cat(point_features_list, dim=-1)
batch_dict['point_features_before_fusion'] = point_features.view(-1, point_features.shape[-1])
point_features = self.vsa_point_feature_fusion(point_features.view(-1, point_features.shape[-1]))
batch_dict['point_features'] = point_features # (BxN, C)
batch_dict['point_coords'] = keypoints # (BxN, 4)
return batch_dict
| en | 0.693647 | Args: im: (H, W, C) [y, x] x: (N) y: (N) Returns: Args: rois: (M, 7 + C) points: (N, 3) sample_radius_with_roi: num_max_points_of_part: Returns: sampled_points: (N_out, 3) Args: points: (N, 3) num_sampled_points: int num_sectors: int Returns: sampled_points: (N_out, 3) # self.fusion_channel = 16 # multi_location # have multi-modality + multi-scale # (H, W, C) # (B, N, C0) # (1, N, 3) # (B, M, 3) # (1, N, 3) # (B, M, 3) # (N, 1, 3) Args: batch_dict: batch_size: keypoints: (B, num_keypoints, 3) multi_scale_3d_features: { 'x_conv4': ... } points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...] spatial_features: optional spatial_features_stride: optional Returns: point_features: (N, C) point_coords: (N, 4) # batch_dict = self.reduce_points(batch_dict) # have multi-modality + multi-scale # multi_location # (BxN, C) # (BxN, 4) Args: keypoints: (N1 + N2 + ..., 4) bev_features: (B, C, H, W) batch_size: bev_stride: Returns: point_bev_features: (N1 + N2 + ..., C) # (H, W, C) # (N1 + N2 + ..., C) Args: roi_boxes: (M, 7 + C) points: (N, 3) Returns: sampled_points: (N_out, 3) Args: batch_dict: Returns: keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z] # (1, N, 3) # (B, M, 3) or (N1 + N2 + ..., 4) Args: aggregate_func: xyz: (N, 3) xyz_features: (N, C) xyz_bs_idxs: (N) new_xyz: (M, 3) new_xyz_batch_cnt: (batch_size), [N1, N2, ...] filter_neighbors_with_roi: True/False radius_of_neighbor: float num_max_points_of_part: int rois: (batch_size, num_rois, 7 + C) Returns: Args: batch_dict: batch_size: keypoints: (B, num_keypoints, 3) multi_scale_3d_features: { 'x_conv4': ... } points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...] spatial_features: optional spatial_features_stride: optional Returns: point_features: (N, C) point_coords: (N, 4) # (BxN, C) # (BxN, 4) | 1.860207 | 2 |
metabot2txt/display.py | HeitorBoschirolli/metabot2txt | 0 | 10016 | <filename>metabot2txt/display.py<gh_stars>0
import os
def display_on_editor(text):
with open('.metabot2txt', 'w') as f:
f.write(text)
os.system('gedit .metabot2txt')
def display_list_on_editor(texts):
if os.path.isfile('.metabot2txt'):
os.remove('.metabot2txt')
for text in texts:
with open('.metabot2txt', 'a') as f:
f.write(text)
f.write('\n=====================================\n')
os.system('gedit .metabot2txt')
| <filename>metabot2txt/display.py<gh_stars>0
import os
def display_on_editor(text):
with open('.metabot2txt', 'w') as f:
f.write(text)
os.system('gedit .metabot2txt')
def display_list_on_editor(texts):
if os.path.isfile('.metabot2txt'):
os.remove('.metabot2txt')
for text in texts:
with open('.metabot2txt', 'a') as f:
f.write(text)
f.write('\n=====================================\n')
os.system('gedit .metabot2txt')
| none | 1 | 2.849291 | 3 |
|
cogs/errors.py | i1470s/IVRY | 3 | 10017 | #PRIMARY IMPORTS
import discord, os, datetime, sys, json, traceback, logging
#SECONDARY IMPORTS
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord.ext import commands
from data import config
#LOGGING
logger = logging.getLogger("ivry")
logger.debug("errors.py Started")
class Errors(commands.Cog):
def __init__(self, client):
self.client = client
#ERROR MESSAGES
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if hasattr(ctx.command, 'on_error'):
return
cog = ctx.cog
if cog:
if cog._get_overridden_method(cog.cog_command_error) is not None:
return
ignored = (commands.CommandNotFound)
error = getattr(error, 'original', error)
if isinstance(error, ignored):
return
#COMMAND ERROR
elif isinstance(error, commands.CommandError):
embed = discord.Embed(title=f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Command Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#CONVERSION ERROR
elif isinstance(error, commands.ConversionError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Conversion Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Conversion Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#USER INPUT ERROR
elif isinstance(error, commands.UserInputError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal User Input Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal User Input Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MISSING REQUIRED ARGUMENT
elif isinstance(error, commands.MissingRequiredArgument):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Conversion Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Conversion Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#TOO MANY ARGUMENTS
elif isinstance(error, commands.TooManyArguments):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Conversion Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Conversion Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BAD ARGUMENT
elif isinstance(error, commands.BadArgument):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Bad Argument Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bad Argument Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MESSAGE NOT FOUND
elif isinstance(error, commands.MessageNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Message Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Message Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MEMBER NOT FOUND
elif isinstance(error, commands.MemberNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Member Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bad Member Not Found occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#USER NOT FOUND
elif isinstance(error, commands.UserNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal User Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal User Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#CHANNEL NOT FOUND
elif isinstance(error, commands.ChannelNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Channel Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Channel Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#CHANNEL NOT READABLE
elif isinstance(error, commands.ChannelNotReadable):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Channel Not Readable Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Channel Not Readable Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BAD COLOR ARGUMENT
elif isinstance(error, commands.BadColourArgument):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Bad Colour Argument Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bad Colour Argument Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#ROLE NOT FOUND
elif isinstance(error, commands.RoleNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Role Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Role Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BAD INVITE ARGUMENT
elif isinstance(error, commands.BadInviteArgument):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Conversion Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Conversion Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EMOJI NOT FOUND
elif isinstance(error, commands.EmojiNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Emoji Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Emoji Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#PARTIAL EMOJI CONVERSION FAILURE
elif isinstance(error, commands.PartialEmojiConversionFailure):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Partial Emoji Conversion Failure Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Partial Emoji Conversion Failure Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BAD BOOL ARGUMENT
elif isinstance(error, commands.BadBoolArgument):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Bad Bool Argument Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bad Bool Argument Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BAD UNION ARGUMENT
elif isinstance(error, commands.BadUnionArgument):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Bad Union Argument Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bad Union Argument Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#ARGUMENT PARSING ERROR
elif isinstance(error, commands.ArgumentParsingError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Argument Parsing Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Argument Parsing Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#UNEXPECTED QUOTE ERROR
elif isinstance(error, commands.UnexpectedQuoteError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Unexpected Quote Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Unexpected Quote Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#INVALID END OF QUOTED STRING
elif isinstance(error, commands.InvalidEndOfQuotedStringError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Invalid End Of Quoted String Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Invalid End Of Quoted String Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EXPECTED CLOSING QUOTE ERROR
elif isinstance(error, commands.ExpectedClosingQuoteError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Expected Closing Quote Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Expected Closing Quote Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#COMMAND NOT FOUND
elif isinstance(error, commands.CommandNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Command Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#CHECK FAILURE
elif isinstance(error, commands.CheckFailure):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Check Failure Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Check Failure Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#CHECK ANY FAILURE
elif isinstance(error, commands.CheckAnyFailure):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Check Any Failure Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Check Any Failure Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#PRIVATE MESSAGE ONLY
elif isinstance(error, commands.PrivateMessageOnly):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Private Message Only Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Private Message Only Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#NO PRIVATE MESSAGE
elif isinstance(error, commands.NoPrivateMessage):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal No Private Message Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal No Private Message Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#NOT OWNER
elif isinstance(error, commands.NotOwner):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Not Owner Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Not Owner Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MISSING PERMISSIONS
elif isinstance(error, commands.MissingPermissions):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Missing Permissions Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Missing Permissions Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MISSING ROLE
elif isinstance(error, commands.MissingRole):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Missing Role Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Missing Role Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BOT MISSING ROLE
elif isinstance(error, commands.BotMissingRole):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Bot Missing Role Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bot Missing Role Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MISSING ANY ROLE
elif isinstance(error, commands.MissingAnyRole):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Missing Any Role Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Missing Any Role Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BOT MISSING ANY ROLE
elif isinstance(error, commands.BotMissingAnyRole):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Bot Missing Any Role Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bot Missing Any Role Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#NSFW CHANNEL REQUIRED
elif isinstance(error, commands.NSFWChannelRequired):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal NSFW Channel Required Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal NSFW Channel Required Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#DISABLED COMMAND
elif isinstance(error, commands.DisabledCommand):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Disabled Command Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Disabled Command Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#COMMAND INVOKE ERROR
elif isinstance(error, commands.CommandInvokeError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Invoke Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Command Invoke Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#COMMAND ON COOLDOWN
elif isinstance(error, commands.CommandOnCooldown):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command On Cooldown Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Command On Cooldown Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MAX CONCURRENCY REACHED
elif isinstance(error, commands.MaxConcurrencyReached):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Max Concurrency Reached Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Max Concurrency Reached Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EXTENSION ERROR
elif isinstance(error, commands.ExtensionError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal EXT Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Extension Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EXTENSION ALREADY LOADED
elif isinstance(error, commands.ExtensionAlreadyLoaded):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal EXT Already Loaded Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Extension Already Loaded Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EXTENSION NOT LOADED
elif isinstance(error, commands.ExtensionNotLoaded):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal EXT Not Loaded Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Extension Not Loaded Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#NO ENTRY POINT ERROR
elif isinstance(error, commands.NoEntryPointError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal No Entry Point Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal No Entrypoint Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EXTENSION FAILED
elif isinstance(error, commands.ExtensionFailed):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal EXT Failed Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Extension Failed Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EXTENSION NOT FOUND
elif isinstance(error, commands.ExtensionNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal EXT Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Extension Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#CLIENT EXCEPTION
#COMMAND REGISTRATION ERROR
elif isinstance(error, commands.CommandRegistrationError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Registration Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Command Registration Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
else:
print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
def setup(client):
client.add_cog(Errors(client)) | #PRIMARY IMPORTS
import discord, os, datetime, sys, json, traceback, logging
#SECONDARY IMPORTS
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from discord.ext import commands
from data import config
#LOGGING
logger = logging.getLogger("ivry")
logger.debug("errors.py Started")
class Errors(commands.Cog):
def __init__(self, client):
self.client = client
#ERROR MESSAGES
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if hasattr(ctx.command, 'on_error'):
return
cog = ctx.cog
if cog:
if cog._get_overridden_method(cog.cog_command_error) is not None:
return
ignored = (commands.CommandNotFound)
error = getattr(error, 'original', error)
if isinstance(error, ignored):
return
#COMMAND ERROR
elif isinstance(error, commands.CommandError):
embed = discord.Embed(title=f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Command Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#CONVERSION ERROR
elif isinstance(error, commands.ConversionError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Conversion Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Conversion Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#USER INPUT ERROR
elif isinstance(error, commands.UserInputError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal User Input Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal User Input Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MISSING REQUIRED ARGUMENT
elif isinstance(error, commands.MissingRequiredArgument):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Conversion Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Conversion Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#TOO MANY ARGUMENTS
elif isinstance(error, commands.TooManyArguments):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Conversion Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Conversion Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BAD ARGUMENT
elif isinstance(error, commands.BadArgument):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Bad Argument Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bad Argument Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MESSAGE NOT FOUND
elif isinstance(error, commands.MessageNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Message Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Message Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MEMBER NOT FOUND
elif isinstance(error, commands.MemberNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Member Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bad Member Not Found occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#USER NOT FOUND
elif isinstance(error, commands.UserNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal User Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal User Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#CHANNEL NOT FOUND
elif isinstance(error, commands.ChannelNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Channel Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Channel Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#CHANNEL NOT READABLE
elif isinstance(error, commands.ChannelNotReadable):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Channel Not Readable Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Channel Not Readable Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BAD COLOR ARGUMENT
elif isinstance(error, commands.BadColourArgument):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Bad Colour Argument Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bad Colour Argument Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#ROLE NOT FOUND
elif isinstance(error, commands.RoleNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Role Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Role Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BAD INVITE ARGUMENT
elif isinstance(error, commands.BadInviteArgument):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Conversion Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Conversion Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EMOJI NOT FOUND
elif isinstance(error, commands.EmojiNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Emoji Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Emoji Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#PARTIAL EMOJI CONVERSION FAILURE
elif isinstance(error, commands.PartialEmojiConversionFailure):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Partial Emoji Conversion Failure Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Partial Emoji Conversion Failure Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BAD BOOL ARGUMENT
elif isinstance(error, commands.BadBoolArgument):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Bad Bool Argument Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bad Bool Argument Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BAD UNION ARGUMENT
elif isinstance(error, commands.BadUnionArgument):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Bad Union Argument Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bad Union Argument Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#ARGUMENT PARSING ERROR
elif isinstance(error, commands.ArgumentParsingError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Argument Parsing Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Argument Parsing Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#UNEXPECTED QUOTE ERROR
elif isinstance(error, commands.UnexpectedQuoteError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Unexpected Quote Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Unexpected Quote Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#INVALID END OF QUOTED STRING
elif isinstance(error, commands.InvalidEndOfQuotedStringError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Invalid End Of Quoted String Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Invalid End Of Quoted String Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EXPECTED CLOSING QUOTE ERROR
elif isinstance(error, commands.ExpectedClosingQuoteError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Expected Closing Quote Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Expected Closing Quote Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#COMMAND NOT FOUND
elif isinstance(error, commands.CommandNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Command Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#CHECK FAILURE
elif isinstance(error, commands.CheckFailure):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Check Failure Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Check Failure Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#CHECK ANY FAILURE
elif isinstance(error, commands.CheckAnyFailure):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Check Any Failure Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Check Any Failure Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#PRIVATE MESSAGE ONLY
elif isinstance(error, commands.PrivateMessageOnly):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Private Message Only Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Private Message Only Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#NO PRIVATE MESSAGE
elif isinstance(error, commands.NoPrivateMessage):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal No Private Message Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal No Private Message Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#NOT OWNER
elif isinstance(error, commands.NotOwner):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Not Owner Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Not Owner Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MISSING PERMISSIONS
elif isinstance(error, commands.MissingPermissions):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Missing Permissions Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Missing Permissions Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MISSING ROLE
elif isinstance(error, commands.MissingRole):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Missing Role Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Missing Role Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BOT MISSING ROLE
elif isinstance(error, commands.BotMissingRole):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Bot Missing Role Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bot Missing Role Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MISSING ANY ROLE
elif isinstance(error, commands.MissingAnyRole):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Missing Any Role Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Missing Any Role Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#BOT MISSING ANY ROLE
elif isinstance(error, commands.BotMissingAnyRole):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Bot Missing Any Role Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Bot Missing Any Role Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#NSFW CHANNEL REQUIRED
elif isinstance(error, commands.NSFWChannelRequired):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal NSFW Channel Required Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal NSFW Channel Required Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#DISABLED COMMAND
elif isinstance(error, commands.DisabledCommand):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Disabled Command Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Disabled Command Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#COMMAND INVOKE ERROR
elif isinstance(error, commands.CommandInvokeError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Invoke Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Command Invoke Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#COMMAND ON COOLDOWN
elif isinstance(error, commands.CommandOnCooldown):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command On Cooldown Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Command On Cooldown Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#MAX CONCURRENCY REACHED
elif isinstance(error, commands.MaxConcurrencyReached):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Max Concurrency Reached Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Max Concurrency Reached Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EXTENSION ERROR
elif isinstance(error, commands.ExtensionError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal EXT Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Extension Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EXTENSION ALREADY LOADED
elif isinstance(error, commands.ExtensionAlreadyLoaded):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal EXT Already Loaded Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Extension Already Loaded Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EXTENSION NOT LOADED
elif isinstance(error, commands.ExtensionNotLoaded):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal EXT Not Loaded Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Extension Not Loaded Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#NO ENTRY POINT ERROR
elif isinstance(error, commands.NoEntryPointError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal No Entry Point Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal No Entrypoint Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EXTENSION FAILED
elif isinstance(error, commands.ExtensionFailed):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal EXT Failed Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Extension Failed Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#EXTENSION NOT FOUND
elif isinstance(error, commands.ExtensionNotFound):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal EXT Not Found Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Extension Not Found Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
#CLIENT EXCEPTION
#COMMAND REGISTRATION ERROR
elif isinstance(error, commands.CommandRegistrationError):
embed = discord.Embed(title = f"Type = `Fatal`",color=0x9B59B6, timestamp=ctx.message.created_at)
embed.set_author(name="IVRY Error", icon_url=self.client.user.avatar_url)
embed.add_field(name = "Error", value="`Internal Command Registration Error`", inline=True)
embed.add_field(name = "Error Point", value=f"`{ctx.command}`", inline=True)
embed.add_field(name = "Trace Back", value=f"```CSS\n{error}```", inline=False)
embed.set_footer(text=f"{config.version} | {config.shards}")
await ctx.send(embed=embed)
print(f'[WARNING] A Fatal internal Command Registration Error occured in execution of {ctx.command}')
logger.debug(f"[ERROR] {ctx.command} | {error}")
else:
print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
def setup(client):
client.add_cog(Errors(client)) | en | 0.413854 | #PRIMARY IMPORTS #SECONDARY IMPORTS #LOGGING #ERROR MESSAGES #COMMAND ERROR #CONVERSION ERROR #USER INPUT ERROR #MISSING REQUIRED ARGUMENT #TOO MANY ARGUMENTS #BAD ARGUMENT #MESSAGE NOT FOUND #MEMBER NOT FOUND #USER NOT FOUND #CHANNEL NOT FOUND #CHANNEL NOT READABLE #BAD COLOR ARGUMENT #ROLE NOT FOUND #BAD INVITE ARGUMENT #EMOJI NOT FOUND #PARTIAL EMOJI CONVERSION FAILURE #BAD BOOL ARGUMENT #BAD UNION ARGUMENT #ARGUMENT PARSING ERROR #UNEXPECTED QUOTE ERROR #INVALID END OF QUOTED STRING #EXPECTED CLOSING QUOTE ERROR #COMMAND NOT FOUND #CHECK FAILURE #CHECK ANY FAILURE #PRIVATE MESSAGE ONLY #NO PRIVATE MESSAGE #NOT OWNER #MISSING PERMISSIONS #MISSING ROLE #BOT MISSING ROLE #MISSING ANY ROLE #BOT MISSING ANY ROLE #NSFW CHANNEL REQUIRED #DISABLED COMMAND #COMMAND INVOKE ERROR #COMMAND ON COOLDOWN #MAX CONCURRENCY REACHED #EXTENSION ERROR #EXTENSION ALREADY LOADED #EXTENSION NOT LOADED #NO ENTRY POINT ERROR #EXTENSION FAILED #EXTENSION NOT FOUND #CLIENT EXCEPTION #COMMAND REGISTRATION ERROR | 2.317625 | 2 |
Using Python to Access Web Data/Problem 6_Extracting Data from JSON 5.py | Karoline0097/University-of-Michigan-Python-for-Everybody | 0 | 10018 | <reponame>Karoline0097/University-of-Michigan-Python-for-Everybody
## Problem 5: Extracting Data from JSON
# Example: http://py4e-data.dr-chuck.net/comments_42.json
# data consists of a number of names and comment counts in JSON
# {
# comments: [
# {
# name: "Matthias"
# count: 97
# },
# {
# name: "Geomer"
# count: 97
# }
# ...
# ]
# }
import urllib.request, urllib.parse, urllib.error
import json
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# prompt for a URL
url = input('Enter URL: ')
# handle for data
data_handle = urllib.request.urlopen(url, context=ctx)
# read the JSON data from that URL using urllib
# decode UTF 8 byte array to Unicode string
data = data_handle.read().decode()
# parse string containing json into structured object (-> JSON object / Python dictionary)
# data_js is dictionary
data_js = json.loads(data)
# compute the sum of the numbers in the file
number_sum = 0
# parse and extract the comment counts from the JSON data,
# data_js['comments'] is list of dictionaries
# print(data_js['comments'])
for user in data_js['comments']:
print('Name:', user['name'])
print('Count:', user['count'])
number_sum = number_sum + user['count']
# Example: Total count 2553
print('Total Count:', number_sum)
| ## Problem 5: Extracting Data from JSON
# Example: http://py4e-data.dr-chuck.net/comments_42.json
# data consists of a number of names and comment counts in JSON
# {
# comments: [
# {
# name: "Matthias"
# count: 97
# },
# {
# name: "Geomer"
# count: 97
# }
# ...
# ]
# }
import urllib.request, urllib.parse, urllib.error
import json
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# prompt for a URL
url = input('Enter URL: ')
# handle for data
data_handle = urllib.request.urlopen(url, context=ctx)
# read the JSON data from that URL using urllib
# decode UTF 8 byte array to Unicode string
data = data_handle.read().decode()
# parse string containing json into structured object (-> JSON object / Python dictionary)
# data_js is dictionary
data_js = json.loads(data)
# compute the sum of the numbers in the file
number_sum = 0
# parse and extract the comment counts from the JSON data,
# data_js['comments'] is list of dictionaries
# print(data_js['comments'])
for user in data_js['comments']:
print('Name:', user['name'])
print('Count:', user['count'])
number_sum = number_sum + user['count']
# Example: Total count 2553
print('Total Count:', number_sum) | en | 0.639481 | ## Problem 5: Extracting Data from JSON # Example: http://py4e-data.dr-chuck.net/comments_42.json # data consists of a number of names and comment counts in JSON # { # comments: [ # { # name: "Matthias" # count: 97 # }, # { # name: "Geomer" # count: 97 # } # ... # ] # } # Ignore SSL certificate errors # prompt for a URL # handle for data # read the JSON data from that URL using urllib # decode UTF 8 byte array to Unicode string # parse string containing json into structured object (-> JSON object / Python dictionary) # data_js is dictionary # compute the sum of the numbers in the file # parse and extract the comment counts from the JSON data, # data_js['comments'] is list of dictionaries # print(data_js['comments']) # Example: Total count 2553 | 3.82395 | 4 |
lessons/day_05/python/app.py | jiaguilera/a-walk-in-graphql | 16 | 10019 | from ariadne import make_executable_schema, load_schema_from_path
from ariadne.asgi import GraphQL
from resolvers import query, skill, person, eye_color, mutation
# import schema from GraphQL file
type_defs = load_schema_from_path("./schema.gql")
schema = make_executable_schema(
type_defs, query, skill, person, eye_color, mutation
)
app = GraphQL(schema, debug=True)
| from ariadne import make_executable_schema, load_schema_from_path
from ariadne.asgi import GraphQL
from resolvers import query, skill, person, eye_color, mutation
# import schema from GraphQL file
type_defs = load_schema_from_path("./schema.gql")
schema = make_executable_schema(
type_defs, query, skill, person, eye_color, mutation
)
app = GraphQL(schema, debug=True)
| en | 0.782967 | # import schema from GraphQL file | 1.698703 | 2 |
src/__main__.py | andreaswatch/piTomation | 0 | 10020 | import importlib
import time
from pathlib import Path
import os
import sys
def import_plugins():
#find actual path
realpath = os.path.realpath(__file__)
dirname = os.path.dirname(realpath)
#add modules & plugins
plugin_path = os.path.join(dirname, "plugins")
for dir_path in Path(plugin_path).rglob('*.py'):
dp = str(dir_path)
if dp.lower().endswith("__init__.py"):
continue
path = dp[len(dirname)+1:-3].replace(os.sep,".")
if len(path.split('.')) < 4:
'''only import the top level plugin directory, so that potential submodules are
only imported if they are imported by the plugins.'''
print(" > " + path)
importlib.import_module(path)
print("Import plugins ..")
import_plugins()
print("Import app ..")
import modules.app.App as piTomation
app: piTomation.App
print("Start app ..")
app = piTomation.App()
#try:
# app = piTomation.App()
#except Exception as ex:
# print(ex)
# exit()
try:
while not app.is_disposed:
time.sleep(1)
except Exception as ex:
print(ex)
| import importlib
import time
from pathlib import Path
import os
import sys
def import_plugins():
#find actual path
realpath = os.path.realpath(__file__)
dirname = os.path.dirname(realpath)
#add modules & plugins
plugin_path = os.path.join(dirname, "plugins")
for dir_path in Path(plugin_path).rglob('*.py'):
dp = str(dir_path)
if dp.lower().endswith("__init__.py"):
continue
path = dp[len(dirname)+1:-3].replace(os.sep,".")
if len(path.split('.')) < 4:
'''only import the top level plugin directory, so that potential submodules are
only imported if they are imported by the plugins.'''
print(" > " + path)
importlib.import_module(path)
print("Import plugins ..")
import_plugins()
print("Import app ..")
import modules.app.App as piTomation
app: piTomation.App
print("Start app ..")
app = piTomation.App()
#try:
# app = piTomation.App()
#except Exception as ex:
# print(ex)
# exit()
try:
while not app.is_disposed:
time.sleep(1)
except Exception as ex:
print(ex)
| en | 0.734025 | #find actual path #add modules & plugins only import the top level plugin directory, so that potential submodules are only imported if they are imported by the plugins. #try: # app = piTomation.App() #except Exception as ex: # print(ex) # exit() | 2.539009 | 3 |
src/decanter/core/extra/utils.py | MatthewK3023/decanter-ai-core-sdk | 0 | 10021 | """
Functions support other modules.
"""
import uuid
def check_response(response, key=None):
"""CHeck the api response.
Make sure the status call is successful and the response have specific key.
Return:
class: `Response <Response>`
"""
code = response.status_code
if not 200 <= code < 300:
raise Exception('[Decanter Core response Error] Request Error')
if key is not None and key not in response.json():
raise KeyError('[Decanter Core response Error] No key value')
return response
def gen_id(type_, name):
"""Generate a random UUID if name isn't given.
Returns:
string
"""
if name is None:
rand_id = uuid.uuid4()
rand_id = str(rand_id)[:8]
name = type_ + '_' + rand_id
return name
def isnotebook():
"""Return True if SDK is running on Jupyter Notebook."""
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
if shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
return False
except NameError:
return False
| """
Functions support other modules.
"""
import uuid
def check_response(response, key=None):
"""CHeck the api response.
Make sure the status call is successful and the response have specific key.
Return:
class: `Response <Response>`
"""
code = response.status_code
if not 200 <= code < 300:
raise Exception('[Decanter Core response Error] Request Error')
if key is not None and key not in response.json():
raise KeyError('[Decanter Core response Error] No key value')
return response
def gen_id(type_, name):
"""Generate a random UUID if name isn't given.
Returns:
string
"""
if name is None:
rand_id = uuid.uuid4()
rand_id = str(rand_id)[:8]
name = type_ + '_' + rand_id
return name
def isnotebook():
"""Return True if SDK is running on Jupyter Notebook."""
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
if shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
return False
except NameError:
return False
| en | 0.743243 | Functions support other modules. CHeck the api response. Make sure the status call is successful and the response have specific key. Return: class: `Response <Response>` Generate a random UUID if name isn't given. Returns: string Return True if SDK is running on Jupyter Notebook. # Jupyter notebook or qtconsole # Terminal running IPython | 3.05427 | 3 |
03/03.py | stevenpclark/aoc2021 | 1 | 10022 | <reponame>stevenpclark/aoc2021<gh_stars>1-10
import numpy as np
def filter_data(data, use_most_common):
_, nc = data.shape
for c in range(nc):
nr, _ = data.shape
if nr <= 1:
break
col_score = sum(data[:,c])/nr
if use_most_common:
keep_val = col_score >= 0.5
else:
keep_val = col_score < 0.5
mask = data[:,c] == keep_val
data = data[mask, :]
x = 0
for n in data[0,:]:
x = (x << 1) + n
return x
def main():
fn = 'input.txt'
#fn = 'test.txt'
lines = np.loadtxt(fn, dtype=str)
num_lines = len(lines)
data = np.array([[int(c) for c in s] for s in lines])
gamma_list = (np.sum(data, axis=0)/num_lines > 0.5).astype(int)
gamma = 0
epsilon = 0
for n in gamma_list:
gamma = (gamma << 1) + n
epsilon = (epsilon << 1) + (1-n)
print(gamma*epsilon)
rating1 = filter_data(data, use_most_common=True)
rating2 = filter_data(data, use_most_common=False)
print(rating1*rating2)
if __name__ == '__main__':
main()
| import numpy as np
def filter_data(data, use_most_common):
_, nc = data.shape
for c in range(nc):
nr, _ = data.shape
if nr <= 1:
break
col_score = sum(data[:,c])/nr
if use_most_common:
keep_val = col_score >= 0.5
else:
keep_val = col_score < 0.5
mask = data[:,c] == keep_val
data = data[mask, :]
x = 0
for n in data[0,:]:
x = (x << 1) + n
return x
def main():
fn = 'input.txt'
#fn = 'test.txt'
lines = np.loadtxt(fn, dtype=str)
num_lines = len(lines)
data = np.array([[int(c) for c in s] for s in lines])
gamma_list = (np.sum(data, axis=0)/num_lines > 0.5).astype(int)
gamma = 0
epsilon = 0
for n in gamma_list:
gamma = (gamma << 1) + n
epsilon = (epsilon << 1) + (1-n)
print(gamma*epsilon)
rating1 = filter_data(data, use_most_common=True)
rating2 = filter_data(data, use_most_common=False)
print(rating1*rating2)
if __name__ == '__main__':
main() | ko | 0.338478 | #fn = 'test.txt' | 2.934243 | 3 |
distillation/build_student.py | fengxiaoshuai/CNN_model_optimizer | 0 | 10023 | <filename>distillation/build_student.py
import tensorflow as tf
import numpy as np
with tf.variable_scope("student"):
input_label = tf.placeholder(dtype=tf.float32, shape=[10, 10], name="label")
input_image = tf.placeholder(dtype=tf.float32, shape=[10, 224, 224, 3], name="input")
conv1 = tf.layers.conv2d(inputs=input_image, filters=64, kernel_size=[3, 3], padding='same')
conv2 = tf.layers.conv2d(conv1, filters=64, kernel_size=[3, 3], padding='same')
conv3 = tf.layers.conv2d(conv2, filters=64, kernel_size=[3, 3], padding='same')
shape = int(np.prod(conv3.get_shape()[1:]))
flat = tf.reshape(conv3, [-1, shape])
fc1 = tf.layers.dense(flat, units=100)
fc2 = tf.layers.dense(fc1, units=10, name="logit")
probability = tf.nn.softmax(fc2)
loss = tf.losses.softmax_cross_entropy(input_label, fc2)
print(input_label)
image = np.ones(shape=[10, 224, 224, 3])
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
saver.save(sess, "./student/student")
print(sess.run(probability, feed_dict={input_image: image}))
| <filename>distillation/build_student.py
import tensorflow as tf
import numpy as np
with tf.variable_scope("student"):
input_label = tf.placeholder(dtype=tf.float32, shape=[10, 10], name="label")
input_image = tf.placeholder(dtype=tf.float32, shape=[10, 224, 224, 3], name="input")
conv1 = tf.layers.conv2d(inputs=input_image, filters=64, kernel_size=[3, 3], padding='same')
conv2 = tf.layers.conv2d(conv1, filters=64, kernel_size=[3, 3], padding='same')
conv3 = tf.layers.conv2d(conv2, filters=64, kernel_size=[3, 3], padding='same')
shape = int(np.prod(conv3.get_shape()[1:]))
flat = tf.reshape(conv3, [-1, shape])
fc1 = tf.layers.dense(flat, units=100)
fc2 = tf.layers.dense(fc1, units=10, name="logit")
probability = tf.nn.softmax(fc2)
loss = tf.losses.softmax_cross_entropy(input_label, fc2)
print(input_label)
image = np.ones(shape=[10, 224, 224, 3])
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
saver.save(sess, "./student/student")
print(sess.run(probability, feed_dict={input_image: image}))
| none | 1 | 2.420336 | 2 |
|
code/statistical_tests.py | ChamiLamelas/Math36B_FinalProject | 0 | 10024 | import scipy.stats
import numpy as np
def f_test(sample_x, sample_y, larger_varx_alt):
"""
Computes the F-value and corresponding p-value for a pair of samples and alternative hypothesis.
Parameters
----------
sample_x : list
A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2.
sample_y : list
A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.
larger_varx_alt : bool
True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2.
Returns
-------
f_value : float
Sx^2 / Sy^2 as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
p_value : float
Let F be the F-distribution with nx, ny df. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise. More extreme F = Sx^2 / Sy^2 values for alternative ox^2 > oy^2 are to the right. More extreme F values for ox^2 < oy^2 are to the left.
"""
# calculate unbiased sample variances (n-1 in the denominator)
sample_var_x = np.var(sample_x, ddof=1)
sample_var_y = np.var(sample_y, ddof=1)
f_value = sample_var_x/sample_var_y
nx = len(sample_x)
ny = len(sample_y)
# compute P(F < f_value) with nx-1, ny-1 df
cdf = scipy.stats.f.cdf(f_value, nx-1, ny-1)
# More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2.
# More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient.
p_value = 1 - cdf if larger_varx_alt else cdf
return f_value, p_value
def f1_test(sample_x, sample_y, larger_varx_alt):
"""
Computes the F1-value as defined in 'Fixing the F Test for Equal Variances' and corresponding p-value for a pair of samples and alternative hypothesis.
Parameters
----------
sample_x : list
A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2.
sample_y : list
A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.
larger_varx_alt : bool
True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2.
Returns
-------
p_value : float
Let F be the F-distribution with rx, ry df as specified in equation (1) of 'Fixing the F Test for Equal Variances'. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise.
"""
# calculate unbiased sample variances (n-1 in the denominator)
sample_var_x = np.var(sample_x, ddof=1)
sample_var_y = np.var(sample_y, ddof=1)
f_value = sample_var_x/sample_var_y
nx = len(sample_x)
ny = len(sample_y)
xmean = np.mean(sample_x)
ymean = np.mean(sample_y)
# compute moment, variance below equatio (1) of Shoemaker paper
fourth_moment = (np.sum((sample_x - xmean)**4) +
np.sum((sample_y - ymean)**4))/(nx + ny)
pooled_var = ((nx-1)*sample_var_x + (ny-1)*sample_var_y)/(nx + ny)
# see equation (1) of Shoemaker paper
rx = 2*nx / ((fourth_moment/pooled_var**2) - ((nx - 3)/(nx - 1)))
ry = 2*ny / ((fourth_moment/pooled_var**2) - ((ny - 3)/(ny - 1)))
# compute P(F < f_value) with rx-1, ry-1 df
cdf = scipy.stats.f.cdf(f_value, rx-1, ry-1)
# More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2.
# More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient.
p_value = 1 - cdf if larger_varx_alt else cdf
return p_value
def count_five(sample_x, sample_y, center):
"""
Computes the extreme counts for samples x and y as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
Parameters
----------
sample_x : list
A random sample x1,...,xn.
sample_y : list
A random sample y1,...,ym.
center : str
Whether to use 'mean' or 'median' for centering.
Returns
-------
extreme_count_x : int
C_x computed with centering mu being sample mean if center = 'mean' and sample median if center = 'median' as defined in equation (1) of 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
extreme_count_y : int
C_y defined analogously to C_x above.
Raises
------
ValueError
If center is neither 'mean' or 'median'.
"""
if center not in {'mean', 'median'}:
raise ValueError('Invalid center %s' % (center))
if center == 'mean':
centering_x = np.mean(sample_x)
centering_y = np.mean(sample_y)
else:
centering_x = np.median(sample_x)
centering_y = np.median(sample_y)
# compute absolute deviations from centering for x, y samples
abs_dev_x = np.abs(np.array(sample_x) - centering_x)
abs_dev_y = np.abs(np.array(sample_y) - centering_y)
# count number of X deviations greater than max Y deviation and vice versa
# see equation (1) of Count Five paper
extreme_count_x = np.sum(np.where(abs_dev_x > np.max(abs_dev_y), 1, 0))
extreme_count_y = np.sum(np.where(abs_dev_y > np.max(abs_dev_x), 1, 0))
return extreme_count_x, extreme_count_y
| import scipy.stats
import numpy as np
def f_test(sample_x, sample_y, larger_varx_alt):
"""
Computes the F-value and corresponding p-value for a pair of samples and alternative hypothesis.
Parameters
----------
sample_x : list
A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2.
sample_y : list
A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.
larger_varx_alt : bool
True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2.
Returns
-------
f_value : float
Sx^2 / Sy^2 as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
p_value : float
Let F be the F-distribution with nx, ny df. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise. More extreme F = Sx^2 / Sy^2 values for alternative ox^2 > oy^2 are to the right. More extreme F values for ox^2 < oy^2 are to the left.
"""
# calculate unbiased sample variances (n-1 in the denominator)
sample_var_x = np.var(sample_x, ddof=1)
sample_var_y = np.var(sample_y, ddof=1)
f_value = sample_var_x/sample_var_y
nx = len(sample_x)
ny = len(sample_y)
# compute P(F < f_value) with nx-1, ny-1 df
cdf = scipy.stats.f.cdf(f_value, nx-1, ny-1)
# More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2.
# More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient.
p_value = 1 - cdf if larger_varx_alt else cdf
return f_value, p_value
def f1_test(sample_x, sample_y, larger_varx_alt):
"""
Computes the F1-value as defined in 'Fixing the F Test for Equal Variances' and corresponding p-value for a pair of samples and alternative hypothesis.
Parameters
----------
sample_x : list
A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2.
sample_y : list
A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.
larger_varx_alt : bool
True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2.
Returns
-------
p_value : float
Let F be the F-distribution with rx, ry df as specified in equation (1) of 'Fixing the F Test for Equal Variances'. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise.
"""
# calculate unbiased sample variances (n-1 in the denominator)
sample_var_x = np.var(sample_x, ddof=1)
sample_var_y = np.var(sample_y, ddof=1)
f_value = sample_var_x/sample_var_y
nx = len(sample_x)
ny = len(sample_y)
xmean = np.mean(sample_x)
ymean = np.mean(sample_y)
# compute moment, variance below equatio (1) of Shoemaker paper
fourth_moment = (np.sum((sample_x - xmean)**4) +
np.sum((sample_y - ymean)**4))/(nx + ny)
pooled_var = ((nx-1)*sample_var_x + (ny-1)*sample_var_y)/(nx + ny)
# see equation (1) of Shoemaker paper
rx = 2*nx / ((fourth_moment/pooled_var**2) - ((nx - 3)/(nx - 1)))
ry = 2*ny / ((fourth_moment/pooled_var**2) - ((ny - 3)/(ny - 1)))
# compute P(F < f_value) with rx-1, ry-1 df
cdf = scipy.stats.f.cdf(f_value, rx-1, ry-1)
# More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2.
# More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient.
p_value = 1 - cdf if larger_varx_alt else cdf
return p_value
def count_five(sample_x, sample_y, center):
"""
Computes the extreme counts for samples x and y as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
Parameters
----------
sample_x : list
A random sample x1,...,xn.
sample_y : list
A random sample y1,...,ym.
center : str
Whether to use 'mean' or 'median' for centering.
Returns
-------
extreme_count_x : int
C_x computed with centering mu being sample mean if center = 'mean' and sample median if center = 'median' as defined in equation (1) of 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
extreme_count_y : int
C_y defined analogously to C_x above.
Raises
------
ValueError
If center is neither 'mean' or 'median'.
"""
if center not in {'mean', 'median'}:
raise ValueError('Invalid center %s' % (center))
if center == 'mean':
centering_x = np.mean(sample_x)
centering_y = np.mean(sample_y)
else:
centering_x = np.median(sample_x)
centering_y = np.median(sample_y)
# compute absolute deviations from centering for x, y samples
abs_dev_x = np.abs(np.array(sample_x) - centering_x)
abs_dev_y = np.abs(np.array(sample_y) - centering_y)
# count number of X deviations greater than max Y deviation and vice versa
# see equation (1) of Count Five paper
extreme_count_x = np.sum(np.where(abs_dev_x > np.max(abs_dev_y), 1, 0))
extreme_count_y = np.sum(np.where(abs_dev_y > np.max(abs_dev_x), 1, 0))
return extreme_count_x, extreme_count_y
| en | 0.805749 | Computes the F-value and corresponding p-value for a pair of samples and alternative hypothesis. Parameters ---------- sample_x : list A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2. sample_y : list A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2. larger_varx_alt : bool True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2. Returns ------- f_value : float Sx^2 / Sy^2 as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'. p_value : float Let F be the F-distribution with nx, ny df. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise. More extreme F = Sx^2 / Sy^2 values for alternative ox^2 > oy^2 are to the right. More extreme F values for ox^2 < oy^2 are to the left. # calculate unbiased sample variances (n-1 in the denominator) # compute P(F < f_value) with nx-1, ny-1 df # More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2. # More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient. Computes the F1-value as defined in 'Fixing the F Test for Equal Variances' and corresponding p-value for a pair of samples and alternative hypothesis. Parameters ---------- sample_x : list A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2. sample_y : list A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2. larger_varx_alt : bool True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2. Returns ------- p_value : float Let F be the F-distribution with rx, ry df as specified in equation (1) of 'Fixing the F Test for Equal Variances'. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise. # calculate unbiased sample variances (n-1 in the denominator) # compute moment, variance below equatio (1) of Shoemaker paper # see equation (1) of Shoemaker paper # compute P(F < f_value) with rx-1, ry-1 df # More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2. # More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient. Computes the extreme counts for samples x and y as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'. Parameters ---------- sample_x : list A random sample x1,...,xn. sample_y : list A random sample y1,...,ym. center : str Whether to use 'mean' or 'median' for centering. Returns ------- extreme_count_x : int C_x computed with centering mu being sample mean if center = 'mean' and sample median if center = 'median' as defined in equation (1) of 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'. extreme_count_y : int C_y defined analogously to C_x above. Raises ------ ValueError If center is neither 'mean' or 'median'. # compute absolute deviations from centering for x, y samples # count number of X deviations greater than max Y deviation and vice versa # see equation (1) of Count Five paper | 3.263476 | 3 |
pawpyseed/compiler.py | akashkumarsingh612/pawpyseed | 0 | 10025 | <gh_stars>0
import os, subprocess
def compile_core(comp, scilib):
"""
ATTENTION, NOT FINISHED
"""
subprocess.call(("make pawpy_%s"%comp).split())
def compile_core(comp, scilib):
"""
ATTENTION, NOT FINISHED
"""
subprocess.call("make hfc".split()) | import os, subprocess
def compile_core(comp, scilib):
"""
ATTENTION, NOT FINISHED
"""
subprocess.call(("make pawpy_%s"%comp).split())
def compile_core(comp, scilib):
"""
ATTENTION, NOT FINISHED
"""
subprocess.call("make hfc".split()) | en | 0.600874 | ATTENTION, NOT FINISHED ATTENTION, NOT FINISHED | 2.351658 | 2 |
finnhub_python/socket.py | humdings/finnhub-python | 0 | 10026 | <reponame>humdings/finnhub-python<filename>finnhub_python/socket.py
"""
Example usage of Finnhub socket API.
"""
from __future__ import print_function # Py2 compat
import websocket
from finnhub_python.utils import get_finnhub_api_key
def write_line(data, fname):
with open(fname, 'a+') as f:
f.write(data + '\n')
def on_message(ws, message):
write_line(message, tick_file)
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
for symbol in SYMBOLS:
subscribe(ws, symbol)
def subscribe(ws, symbol):
template = '{"type":"subscribe","symbol":"X"}'
req = template.replace('X', symbol.upper())
ws.send(req)
tick_file = 'raw_ticks.txt'
token = get_finnhub_api_key()
SYMBOLS = [
"AAPL",
"SPY",
"VXX",
"BINANCE:ETHUSDT",
"BINANCE:BTCUSDT"
]
if __name__ == "__main__":
websocket.enableTrace(True)
ws = websocket.WebSocketApp("wss://ws.finnhub.io?token=" + token,
on_message=on_message,
on_error=on_error,
on_close=on_close)
ws.on_open = on_open
ws.run_forever() | """
Example usage of Finnhub socket API.
"""
from __future__ import print_function # Py2 compat
import websocket
from finnhub_python.utils import get_finnhub_api_key
def write_line(data, fname):
with open(fname, 'a+') as f:
f.write(data + '\n')
def on_message(ws, message):
write_line(message, tick_file)
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
for symbol in SYMBOLS:
subscribe(ws, symbol)
def subscribe(ws, symbol):
template = '{"type":"subscribe","symbol":"X"}'
req = template.replace('X', symbol.upper())
ws.send(req)
tick_file = 'raw_ticks.txt'
token = get_finnhub_api_key()
SYMBOLS = [
"AAPL",
"SPY",
"VXX",
"BINANCE:ETHUSDT",
"BINANCE:BTCUSDT"
]
if __name__ == "__main__":
websocket.enableTrace(True)
ws = websocket.WebSocketApp("wss://ws.finnhub.io?token=" + token,
on_message=on_message,
on_error=on_error,
on_close=on_close)
ws.on_open = on_open
ws.run_forever() | en | 0.594886 | Example usage of Finnhub socket API. # Py2 compat ## closed ###") | 3.216424 | 3 |
pycovjson/cli/convert.py | RileyWilliams/pycovjson | 10 | 10027 | """
Pycovjson - Command line interface
Author: rileywilliams
Version: 0.1.0
"""
import argparse
from pycovjson.write import Writer
from pycovjson.read_netcdf import NetCDFReader as Reader
def main():
"""
Command line interface for pycovjson - Converts Scientific Data Formats into CovJSON and saves to disk.
:argument -i: Input file path.
:argument -o: Output file name.
:argument -t: Use Tiling.
:argument -v: Which variable to populate coverage with.
:argument -s: [tile shape]: Tile shape.
:argument -n: Use interactive mode.
:argument -u: MongoDB URL
"""
parser = argparse.ArgumentParser(
description='Convert Scientific Data Formats into CovJSON.')
parser.add_argument('-i', '--input', dest='inputfile',
help='Name of input file', required=True)
parser.add_argument('-o', '--output', dest='outputfile',
help='Name and location of output file', default='coverage.covjson')
parser.add_argument('-t', '--tiled', action='store_true', help='Apply tiling')
parser.add_argument('-s', '--shape', nargs='+',
help='Tile shape, list', type=int)
parser.add_argument('-v', dest='variable',
help='Variable to populate coverage with', required=True)
parser.add_argument('-n', '--interactive', action='store_true', help='Enter interactive mode')
parser.add_argument('-u', '--endpoint_url', dest='endpoint_url', nargs=1,
help='MongoDB endpoint for CovJSON persistence')
args = parser.parse_args()
inputfile = args.inputfile
outputfile = args.outputfile
variable = args.variable
tiled = args.tiled
tile_shape = args.shape
interactive = args.interactive
endpoint_url = args.endpoint_url
if interactive:
axis = input('Which Axis?', Reader.get_axis(variable))
if tiled and len(tile_shape) == 0:
reader = Reader(inputfile)
shape_list = reader.get_shape(variable)
dims = reader.get_dimensions(variable)
print(list(zip(dims, shape_list)))
tile_shape = input(
'Enter the shape tile shape as a list of comma separated integers')
tile_shape = tile_shape.split(',')
tile_shape = list(map(int, tile_shape))
print(tile_shape)
if outputfile == None:
outputfile = outputfile.default
Writer(outputfile, inputfile, [variable],
tiled=tiled, tile_shape=tile_shape, endpoint_url=endpoint_url).write()
if __name__ == '__main__':
main()
| """
Pycovjson - Command line interface
Author: rileywilliams
Version: 0.1.0
"""
import argparse
from pycovjson.write import Writer
from pycovjson.read_netcdf import NetCDFReader as Reader
def main():
"""
Command line interface for pycovjson - Converts Scientific Data Formats into CovJSON and saves to disk.
:argument -i: Input file path.
:argument -o: Output file name.
:argument -t: Use Tiling.
:argument -v: Which variable to populate coverage with.
:argument -s: [tile shape]: Tile shape.
:argument -n: Use interactive mode.
:argument -u: MongoDB URL
"""
parser = argparse.ArgumentParser(
description='Convert Scientific Data Formats into CovJSON.')
parser.add_argument('-i', '--input', dest='inputfile',
help='Name of input file', required=True)
parser.add_argument('-o', '--output', dest='outputfile',
help='Name and location of output file', default='coverage.covjson')
parser.add_argument('-t', '--tiled', action='store_true', help='Apply tiling')
parser.add_argument('-s', '--shape', nargs='+',
help='Tile shape, list', type=int)
parser.add_argument('-v', dest='variable',
help='Variable to populate coverage with', required=True)
parser.add_argument('-n', '--interactive', action='store_true', help='Enter interactive mode')
parser.add_argument('-u', '--endpoint_url', dest='endpoint_url', nargs=1,
help='MongoDB endpoint for CovJSON persistence')
args = parser.parse_args()
inputfile = args.inputfile
outputfile = args.outputfile
variable = args.variable
tiled = args.tiled
tile_shape = args.shape
interactive = args.interactive
endpoint_url = args.endpoint_url
if interactive:
axis = input('Which Axis?', Reader.get_axis(variable))
if tiled and len(tile_shape) == 0:
reader = Reader(inputfile)
shape_list = reader.get_shape(variable)
dims = reader.get_dimensions(variable)
print(list(zip(dims, shape_list)))
tile_shape = input(
'Enter the shape tile shape as a list of comma separated integers')
tile_shape = tile_shape.split(',')
tile_shape = list(map(int, tile_shape))
print(tile_shape)
if outputfile == None:
outputfile = outputfile.default
Writer(outputfile, inputfile, [variable],
tiled=tiled, tile_shape=tile_shape, endpoint_url=endpoint_url).write()
if __name__ == '__main__':
main()
| en | 0.439618 | Pycovjson - Command line interface Author: rileywilliams Version: 0.1.0 Command line interface for pycovjson - Converts Scientific Data Formats into CovJSON and saves to disk. :argument -i: Input file path. :argument -o: Output file name. :argument -t: Use Tiling. :argument -v: Which variable to populate coverage with. :argument -s: [tile shape]: Tile shape. :argument -n: Use interactive mode. :argument -u: MongoDB URL | 3.338622 | 3 |
duels/duels.py | ridinginstyle00/redcogs | 8 | 10028 | <reponame>ridinginstyle00/redcogs
import discord
from discord.ext import commands
from .utils import checks
from .utils.dataIO import dataIO
from __main__ import send_cmd_help
from __main__ import settings
from datetime import datetime
from random import choice
from random import sample
from copy import deepcopy
from collections import namedtuple, defaultdict
import os
import logging
import aiohttp
import asyncio
import time
from time import sleep
client = discord.Client()
class Duels:
def __init__(self, bot):
global globvar
self.bot = bot
self.duelist = dataIO.load_json("data/duels/duelist.json")
self.nuels = "duels"
self.counter = "Number:"
self.setter = "Max:"
self.wlt = dataIO.load_json("data/duels/account.json")
self.timer_board = dataIO.load_json("data/duels/timer.json")
@commands.group(name="duels", pass_context=True)
async def _duels(self, ctx):
"""Duel with another player!!"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@commands.command(name="tjoin", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def tjoin(self, ctx):
"""Add server to timer list"""
author = ctx.message.author
server = author.server
if server.id not in self.timer_board:
self.timer_board[server.id] = {"time": 0}
dataIO.save_json("data/duels/timer.json", self.timer_board)
await self.bot.say("**{}** has been added to the timer board!".format(server.name))
else:
await self.bot.say("**{}** has already been added to the timer_board!".format(server.name))
@commands.command(name="duel", pass_context=True, no_pm=True)
async def _duel(self, ctx, user: discord.Member=None, otheruser : discord.Member=None):
"""Duel another player"""
author = ctx.message.author
server = author.server
if not user or not otheruser:
await self.bot.reply("Please mention two users that you want to see a duel of!")
elif user.id == otheruser.id:
await self.bot.reply("Silly, you can't see a duel of someone against themselves!")
else:
if server.id in self.timer_board:
if self.timer_board[server.id]["time"] == 0:
self.timer_board[server.id]["time"] += 1
dataIO.save_json("data/duels/timer.json", self.timer_board)
nick_player1 = user.name
nick_player2 = otheruser.name
action = self.duelist[self.nuels]
action_damage1, action_damage2, action_damage3, action_damage4 = self.action_damage()
action_chosen1, action_chosen2, action_chosen3, action_chosen4 = sample(action,4)
hp_player1 = 100
hp_player2 = 100
player1_id = user.id
player2_id = otheruser.id
await self.bot.say("**{}** dueled **{}**!!\n\nPlease wait for the duel to start! Both players will begin with **{}** health!".format(user.mention, otheruser.mention, hp_player1))
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player1, action_chosen1, nick_player2, action_damage1))
hp_player2 = hp_player2 - action_damage1
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player2, action_chosen2, nick_player1, action_damage2))
hp_player1 = hp_player1 - action_damage2
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player1, action_chosen3, nick_player2, action_damage3))
hp_player2 = hp_player2 - action_damage3
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player2, action_chosen2, nick_player1, action_damage4))
hp_player1 = hp_player1 - action_damage4
if hp_player1 > hp_player2:
winning_player = nick_player1
losing_player = nick_player2
remaining_hp = hp_player1
await asyncio.sleep(1)
await self.bot.say("After 4 rounds of bloody combat, the winner is **{}** with **{}** health!".format(winning_player, remaining_hp))
if player1_id not in self.wlt:
self.wlt[player1_id] = {"name": winning_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(winning_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(winning_player))
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player1_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player1_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
if player2_id not in self.wlt:
self.wlt[player2_id] = {"name": losing_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(losing_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(losing_player))
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player2_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player2_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
elif hp_player1 == hp_player2:
remaining_hp = hp_player1
await asyncio.sleep(1)
await self.bot.say("After 4 rounds of bloody combat, the winner is **no one because it's a draw** with both players still having **{}** health!".format(remaining_hp))
if player1_id not in self.wlt:
self.wlt[player1_id] = {"name": nick_player1, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(nick_player1))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(nick_player1))
await self.bot.say("{} gained +1 TIE!!".format(nick_player1))
self.wlt[player1_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 TIE!!".format(nick_player1))
self.wlt[player1_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
if player2_id not in self.wlt:
self.wlt[player2_id] = {"name": nick_player2, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(nick_player2))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(nick_player2))
await self.bot.say("{} gained +1 TIE!!".format(nick_player2))
self.wlt[player2_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 TIE!!".format(nick_player2))
self.wlt[player2_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
winning_player = nick_player2
losing_player = nick_player1
remaining_hp = hp_player2
await asyncio.sleep(1)
await self.bot.say("After 4 rounds of bloody combat, the winner is **{}** with **{}** health!".format(winning_player, remaining_hp))
if player2_id not in self.wlt:
self.wlt[player2_id] = {"name": winning_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(winning_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(winning_player))
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player2_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player2_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
if player1_id not in self.wlt:
self.wlt[player1_id] = {"name": losing_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(losing_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(losing_player))
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player1_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player1_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
self.timer_board[server.id]["time"] -= 1
dataIO.save_json("data/duels/timer.json", self.timer_board)
else:
await self.bot.say("**A duel is already running!\nPlease wait for the current one to finish!**")
else:
await self.bot.say("Please do {}tjoin to be added to the timer board!".format(ctx.prefix))
@_duels.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
async def add (self, ctx, *, Duel : str):
"""Adds a duel to the list"""
if self.nuels not in self.duelist:
self.duelist[self.nuels] = ["Super Falcon Punched",
"shot",
"kidnapped",
"called 'The Spanker' on",
"ran over",
"Super Falcon Kicked",
"One Punched",
"used One Punch Man on",
"Kamehameha'd",
"Final Flashed",
"Instant Transmission Kamehameha'd",
"Omega Blastered",
"Rick Roll'd",
"Kaioken X4 Kamehameha'd",
"Spirit Bombed",
"hacked",
"Perfect Kamehameha'd",
"used Destructo Disc on",
"used Destructo Disc X2 on",
"used Destructo Disc Chain on",
"Big Bang Kamehameha'd",
"Big Bang Attacked",
"Galick Gunned",
"used Chuck Norris on",
"used Dragon Fist on",
"Final Kamehameha'd",
"Air striked",
"concrete donkey'd",
"super banana bombed",
"Holy Hand Grenaded"]
self.duelist[self.setter] = 100
await self.bot.say("Setter hasn't been added yet. Setter has been auto set to: **{}**".format(self.duelist[self.setter]))
dataIO.save_json("data/duels/duelist.json", self.duelist)
if Duel in self.duelist[self.nuels]:
await self.bot.say("Uh oh. It seems `{}` has already been added to the list.".format(Duel))
else:
if self.counter not in self.duelist:
self.duelist[self.counter] = 0
if self.setter not in self.duelist:
self.duelist[self.setter] = 100
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("Setter hasn't been added yet. Setter has been auto set to: **{}**".format(self.duelist[self.setter]))
if self.duelist[self.counter] < self.duelist[self.setter]:
self.duelist[self.nuels].append(Duel)
self.duelist[self.counter] += 1
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("`{}` has been added to the duel list!".format(Duel))
else:
await self.bot.say("The maximum amount of duel actions has been added (**{}**). Please contact someone with the `Manage Server` permission to change this.".format(self.duelist[self.setter]))
@_duels.command(name="set", pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
async def _set(self, ctx, setter : int=None):
"""Sets the maximum amount of duels that can be added"""
if not setter:
if self.setter not in self.duelist:
self.duelist[self.setter] = 100
await self.bot.say("Setter is currently set to: **{}**".format(self.duelist[self.setter]))
else:
if self.setter not in self.duelist:
self.duelist[self.setter] = 100
await self.bot.say("Setter hasn't been added yet. Setter has been auto set to: **{}**".format(self.duelist[self.setter]))
self.duelist[self.setter] = setter
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("The Duel List Setter has been set to allow a maximum of **{}** items.".format(setter))
#Save function here that isn't added yet
else:
self.duelist[self.setter] = setter
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("The Duel List Setter has been set to allow a maximum of **{}** items.".format(setter))
#Save function here that isn't added yet
if not setter:
await self.bot.say("Setter is currently set to: **{}**".format(self.duelist[self.setter]))
@_duels.command(pass_context=True, no_pm=True)
async def join(self, ctx, user: discord.Member=None):
"""Join tournament"""
user = ctx.message.author
if user.id not in self.wlt:
self.wlt[user.id] = {"name": user.name, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has joined the tournament!".format(user.mention))
else:
await self.bot.say("{} has already joined the tournament".format(user.mention))
@_duels.command(name="stats", pass_context=True)
async def _stats(self, ctx, user : discord.Member=None):
"""Show rank and XP of users.
Defaults to yours."""
if not user:
user = ctx.message.author
if self.check_joined(user.id):
await self.bot.say("{}'s stats: **Wins: {} | Losses: {} | Ties: {} **".format(user.name, self.get_wins(user.id),
self.get_losses(user.id),
self.get_ties(user.id)))
else:
await self.bot.say("{}, you are not yet in the tournament!".format(user.mention))
else:
if self.check_joined(user.id):
await self.bot.say("{}'s stats: **Wins: {} | Losses: {} | Ties: {} **".format(user.name, self.get_wins(user.id),
self.get_losses(user.id),
self.get_ties(user.id)))
else:
await self.bot.say("This user has not joined the tournament")
@_duels.command(pass_context=True, no_pm=True)
async def show (self, ctx):
"""Shows list of available duels"""
if self.nuels not in self.duelist:
self.duelist[self.setter] = 100
self.duelist[self.counter] = 30
self.duelist[self.nuels] = ["Super Falcon Punched",
"shot",
"kidnapped",
"called 'The Spanker' on",
"ran over",
"Super Falcon Kicked",
"One Punched",
"used One Punch Man on",
"Kamehameha'd",
"Final Flashed",
"Instant Transmission Kamehameha'd",
"Omega Blastered",
"Rick Roll'd",
"Kaioken X4 Kamehameha'd",
"Spirit Bombed",
"hacked",
"Perfect Kamehameha'd",
"used Destructo Disc on",
"used Destructo Disc X2 on",
"used Destructo Disc Chain on",
"Big Bang Kamehameha'd",
"Big Bang Attacked",
"Galick Gunned",
"used Chuck Norris on",
"used Dragon Fist on",
"Final Kamehameha'd",
"Air striked",
"concrete donkey'd",
"super banana bombed",
"Holy Hand Grenaded"]
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say(" \n\n\n\n\nThe 30 duels are preset duels that are added automatically on first run. (Code looks like crap right now though :wink:)".format(ctx.prefix))
strbuffer = self.duel_show().split("\n")
mess = ""
if self.duelist[self.counter] == self.duelist[self.setter]:
await self.bot.say("**{}** out of **{}** spaces used! **MAXED OUT!!**".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
else:
await self.bot.say("**{}** out of **{}** spaces used!".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
for line in strbuffer:
if len(mess) + len(line) + 1 < 300:
mess += "\n" + line
else:
await self.bot.say(mess)
mess = ""
if mess != "":
await self.bot.say(mess)
else:
strbuffer = self.duel_show().split("\n")
mess = ""
if self.duelist[self.counter] == self.duelist[self.setter]:
await self.bot.say("**{}** out of **{}** spaces used! **MAXED OUT!!**".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
else:
await self.bot.say("**{}** out of **{}** spaces used!".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
for line in strbuffer:
if len(mess) + len(line) + 1 < 300:
mess += "\n" + line
else:
await self.bot.say(mess)
mess = ""
if mess != "":
await self.bot.say(mess)
@_duels.command(pass_context=True, no_pm=True)
async def remove (self, ctx, Duel : str):
"""removes a duel from the list"""
try:
x = self.duelist[self.nuels].remove(Duel)
if x is not ValueError:
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("{} has been successfully removed from the duel list!".format(Duel))
except ValueError:
await self.bot.say("I can't remove what hasn't been added to the list to begin with.")
@_duels.command(pass_context=True, no_pm=True)
async def reset (self, ctx):
"""For when you have waaay too many duels"""
if len(self.duelist[self.nuels]) > 0:
self.duelist[self.counter] = 0
self.duelist[self.nuels] = []
dataIO.save_json("data/duels/duelist.json", self.duelist)
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("Duel list has been reset")
else:
await self.bot.say("I can't delete a list that's already empty!")
@_duels.command(pass_context=True)
async def timerreset(self, ctx):
"""Reset the duel timer, only use if the system hangs or breaks!"""
author = ctx.message.author
server = author.server
if server.id in self.timer_board:
if self.timer_board[server.id]["time"] == 0:
await self.bot.say("There isn't a timer right now (no duel running).")
else:
self.timer_board[server.id]["time"] = 0
await self.bot.say("Timer has been reset!")
else:
await self.bot.say("Please do {}tjoin to be added to the timer board!".format(ctx.prefix))
#This cog was made by Axaios and Ridinginstyle00. And any code taken from others we also credit them here, whether we know their name or not.
def duel_show (self):
ret = "```--------```"
for num, duels in enumerate(self.duelist[self.nuels]):
ret += str(num + 1) + ") `" + duels + "`\n"
ret += "```--------```"
return ret
def action_choose (self):
action = choice(sample(self.duelist[self.nuels],1))
return action
def multiple_action_choose (self):
action1 = self.action_choose()
action2 = self.action_choose()
action3 = self.action_choose()
action4 = self.action_choose()
return action1, action2, action3, action4
def action_damage (self):
action_chosen1, action_chosen2, action_chosen3, action_chosen4 = self.multiple_action_choose()
action_damage1 = self.duelist[self.nuels].index(action_chosen1)
action_damage2 = self.duelist[self.nuels].index(action_chosen2)
action_damage3 = self.duelist[self.nuels].index(action_chosen3)
action_damage4 = self.duelist[self.nuels].index(action_chosen4)
return action_damage1, action_damage2, action_damage3, action_damage4
def check_joined(self, id):
if id in self.wlt:
return True
else:
return False
def get_wins(self, id):
if self.check_joined(id):
return self.wlt[id]["Wins"]
def get_losses(self, id):
if self.check_joined(id):
return self.wlt[id]["Losses"]
def get_ties(self, id):
if self.check_joined(id):
return self.wlt[id]["Ties"]
def display_time(self, seconds, granularity=2): # What would I ever do without stackoverflow?
intervals = ( # Source: http://stackoverflow.com/a/24542445
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def check_folders():
if not os.path.exists("data/duels"):
print("Creating data/duels folder...")
os.mkdir("data/duels")
def check_files():
fp = "data/duels/duelist.json"
if not dataIO.is_valid_json(fp):
print("Creating duelist.json...")
dataIO.save_json(fp, {})
acc = "data/duels/account.json"
if not dataIO.is_valid_json(acc):
print("creating account.json...")
dataIO.save_json(acc, {})
fp = "data/duels/timer.json"
if not dataIO.is_valid_json(fp):
print("Creating timer.json...")
dataIO.save_json(fp, {})
def setup(bot):
global logger
check_folders()
check_files()
n = Duels(bot)
logger = logging.getLogger("red.duels")
if logger.level == 0: # Prevents the logger from being loaded again in case of module reload
logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename='data/duels/duels.log', encoding='utf-8', mode='a')
handler.setFormatter(logging.Formatter('%(asctime)s %(message)s', datefmt="[%d/%m/%Y %H:%M]"))
logger.addHandler(handler)
bot.add_cog(n) | import discord
from discord.ext import commands
from .utils import checks
from .utils.dataIO import dataIO
from __main__ import send_cmd_help
from __main__ import settings
from datetime import datetime
from random import choice
from random import sample
from copy import deepcopy
from collections import namedtuple, defaultdict
import os
import logging
import aiohttp
import asyncio
import time
from time import sleep
client = discord.Client()
class Duels:
def __init__(self, bot):
global globvar
self.bot = bot
self.duelist = dataIO.load_json("data/duels/duelist.json")
self.nuels = "duels"
self.counter = "Number:"
self.setter = "Max:"
self.wlt = dataIO.load_json("data/duels/account.json")
self.timer_board = dataIO.load_json("data/duels/timer.json")
@commands.group(name="duels", pass_context=True)
async def _duels(self, ctx):
"""Duel with another player!!"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@commands.command(name="tjoin", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def tjoin(self, ctx):
"""Add server to timer list"""
author = ctx.message.author
server = author.server
if server.id not in self.timer_board:
self.timer_board[server.id] = {"time": 0}
dataIO.save_json("data/duels/timer.json", self.timer_board)
await self.bot.say("**{}** has been added to the timer board!".format(server.name))
else:
await self.bot.say("**{}** has already been added to the timer_board!".format(server.name))
@commands.command(name="duel", pass_context=True, no_pm=True)
async def _duel(self, ctx, user: discord.Member=None, otheruser : discord.Member=None):
"""Duel another player"""
author = ctx.message.author
server = author.server
if not user or not otheruser:
await self.bot.reply("Please mention two users that you want to see a duel of!")
elif user.id == otheruser.id:
await self.bot.reply("Silly, you can't see a duel of someone against themselves!")
else:
if server.id in self.timer_board:
if self.timer_board[server.id]["time"] == 0:
self.timer_board[server.id]["time"] += 1
dataIO.save_json("data/duels/timer.json", self.timer_board)
nick_player1 = user.name
nick_player2 = otheruser.name
action = self.duelist[self.nuels]
action_damage1, action_damage2, action_damage3, action_damage4 = self.action_damage()
action_chosen1, action_chosen2, action_chosen3, action_chosen4 = sample(action,4)
hp_player1 = 100
hp_player2 = 100
player1_id = user.id
player2_id = otheruser.id
await self.bot.say("**{}** dueled **{}**!!\n\nPlease wait for the duel to start! Both players will begin with **{}** health!".format(user.mention, otheruser.mention, hp_player1))
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player1, action_chosen1, nick_player2, action_damage1))
hp_player2 = hp_player2 - action_damage1
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player2, action_chosen2, nick_player1, action_damage2))
hp_player1 = hp_player1 - action_damage2
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player1, action_chosen3, nick_player2, action_damage3))
hp_player2 = hp_player2 - action_damage3
await asyncio.sleep(1)
await self.bot.say("**{}** `{}` **{}** and took off **{}** health!".format(nick_player2, action_chosen2, nick_player1, action_damage4))
hp_player1 = hp_player1 - action_damage4
if hp_player1 > hp_player2:
winning_player = nick_player1
losing_player = nick_player2
remaining_hp = hp_player1
await asyncio.sleep(1)
await self.bot.say("After 4 rounds of bloody combat, the winner is **{}** with **{}** health!".format(winning_player, remaining_hp))
if player1_id not in self.wlt:
self.wlt[player1_id] = {"name": winning_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(winning_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(winning_player))
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player1_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player1_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
if player2_id not in self.wlt:
self.wlt[player2_id] = {"name": losing_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(losing_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(losing_player))
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player2_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player2_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
elif hp_player1 == hp_player2:
remaining_hp = hp_player1
await asyncio.sleep(1)
await self.bot.say("After 4 rounds of bloody combat, the winner is **no one because it's a draw** with both players still having **{}** health!".format(remaining_hp))
if player1_id not in self.wlt:
self.wlt[player1_id] = {"name": nick_player1, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(nick_player1))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(nick_player1))
await self.bot.say("{} gained +1 TIE!!".format(nick_player1))
self.wlt[player1_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 TIE!!".format(nick_player1))
self.wlt[player1_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
if player2_id not in self.wlt:
self.wlt[player2_id] = {"name": nick_player2, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(nick_player2))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(nick_player2))
await self.bot.say("{} gained +1 TIE!!".format(nick_player2))
self.wlt[player2_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 TIE!!".format(nick_player2))
self.wlt[player2_id]["Ties"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
winning_player = nick_player2
losing_player = nick_player1
remaining_hp = hp_player2
await asyncio.sleep(1)
await self.bot.say("After 4 rounds of bloody combat, the winner is **{}** with **{}** health!".format(winning_player, remaining_hp))
if player2_id not in self.wlt:
self.wlt[player2_id] = {"name": winning_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(winning_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(winning_player))
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player2_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 WIN!!".format(winning_player))
self.wlt[player2_id]["Wins"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
if player1_id not in self.wlt:
self.wlt[player1_id] = {"name": losing_player, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has not yet entered the duel tournament!".format(losing_player))
await asyncio.sleep(.5)
await self.bot.say("{} has joined the duel tournament, currently changing settings!".format(losing_player))
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player1_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
else:
await self.bot.say("{} gained +1 LOSE!!".format(losing_player))
self.wlt[player1_id]["Losses"] += 1
dataIO.save_json("data/duels/account.json", self.wlt)
self.timer_board[server.id]["time"] -= 1
dataIO.save_json("data/duels/timer.json", self.timer_board)
else:
await self.bot.say("**A duel is already running!\nPlease wait for the current one to finish!**")
else:
await self.bot.say("Please do {}tjoin to be added to the timer board!".format(ctx.prefix))
@_duels.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
async def add (self, ctx, *, Duel : str):
"""Adds a duel to the list"""
if self.nuels not in self.duelist:
self.duelist[self.nuels] = ["Super Falcon Punched",
"shot",
"kidnapped",
"called 'The Spanker' on",
"ran over",
"Super Falcon Kicked",
"One Punched",
"used One Punch Man on",
"Kamehameha'd",
"Final Flashed",
"Instant Transmission Kamehameha'd",
"Omega Blastered",
"Rick Roll'd",
"Kaioken X4 Kamehameha'd",
"Spirit Bombed",
"hacked",
"Perfect Kamehameha'd",
"used Destructo Disc on",
"used Destructo Disc X2 on",
"used Destructo Disc Chain on",
"Big Bang Kamehameha'd",
"Big Bang Attacked",
"Galick Gunned",
"used Chuck Norris on",
"used Dragon Fist on",
"Final Kamehameha'd",
"Air striked",
"concrete donkey'd",
"super banana bombed",
"Holy Hand Grenaded"]
self.duelist[self.setter] = 100
await self.bot.say("Setter hasn't been added yet. Setter has been auto set to: **{}**".format(self.duelist[self.setter]))
dataIO.save_json("data/duels/duelist.json", self.duelist)
if Duel in self.duelist[self.nuels]:
await self.bot.say("Uh oh. It seems `{}` has already been added to the list.".format(Duel))
else:
if self.counter not in self.duelist:
self.duelist[self.counter] = 0
if self.setter not in self.duelist:
self.duelist[self.setter] = 100
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("Setter hasn't been added yet. Setter has been auto set to: **{}**".format(self.duelist[self.setter]))
if self.duelist[self.counter] < self.duelist[self.setter]:
self.duelist[self.nuels].append(Duel)
self.duelist[self.counter] += 1
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("`{}` has been added to the duel list!".format(Duel))
else:
await self.bot.say("The maximum amount of duel actions has been added (**{}**). Please contact someone with the `Manage Server` permission to change this.".format(self.duelist[self.setter]))
@_duels.command(name="set", pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
async def _set(self, ctx, setter : int=None):
"""Sets the maximum amount of duels that can be added"""
if not setter:
if self.setter not in self.duelist:
self.duelist[self.setter] = 100
await self.bot.say("Setter is currently set to: **{}**".format(self.duelist[self.setter]))
else:
if self.setter not in self.duelist:
self.duelist[self.setter] = 100
await self.bot.say("Setter hasn't been added yet. Setter has been auto set to: **{}**".format(self.duelist[self.setter]))
self.duelist[self.setter] = setter
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("The Duel List Setter has been set to allow a maximum of **{}** items.".format(setter))
#Save function here that isn't added yet
else:
self.duelist[self.setter] = setter
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("The Duel List Setter has been set to allow a maximum of **{}** items.".format(setter))
#Save function here that isn't added yet
if not setter:
await self.bot.say("Setter is currently set to: **{}**".format(self.duelist[self.setter]))
@_duels.command(pass_context=True, no_pm=True)
async def join(self, ctx, user: discord.Member=None):
"""Join tournament"""
user = ctx.message.author
if user.id not in self.wlt:
self.wlt[user.id] = {"name": user.name, "Wins": 0, "Losses": 0, "Ties": 0}
dataIO.save_json("data/duels/account.json", self.wlt)
await self.bot.say("{} has joined the tournament!".format(user.mention))
else:
await self.bot.say("{} has already joined the tournament".format(user.mention))
@_duels.command(name="stats", pass_context=True)
async def _stats(self, ctx, user : discord.Member=None):
"""Show rank and XP of users.
Defaults to yours."""
if not user:
user = ctx.message.author
if self.check_joined(user.id):
await self.bot.say("{}'s stats: **Wins: {} | Losses: {} | Ties: {} **".format(user.name, self.get_wins(user.id),
self.get_losses(user.id),
self.get_ties(user.id)))
else:
await self.bot.say("{}, you are not yet in the tournament!".format(user.mention))
else:
if self.check_joined(user.id):
await self.bot.say("{}'s stats: **Wins: {} | Losses: {} | Ties: {} **".format(user.name, self.get_wins(user.id),
self.get_losses(user.id),
self.get_ties(user.id)))
else:
await self.bot.say("This user has not joined the tournament")
@_duels.command(pass_context=True, no_pm=True)
async def show (self, ctx):
"""Shows list of available duels"""
if self.nuels not in self.duelist:
self.duelist[self.setter] = 100
self.duelist[self.counter] = 30
self.duelist[self.nuels] = ["Super Falcon Punched",
"shot",
"kidnapped",
"called 'The Spanker' on",
"ran over",
"Super Falcon Kicked",
"One Punched",
"used One Punch Man on",
"Kamehameha'd",
"Final Flashed",
"Instant Transmission Kamehameha'd",
"Omega Blastered",
"Rick Roll'd",
"Kaioken X4 Kamehameha'd",
"Spirit Bombed",
"hacked",
"Perfect Kamehameha'd",
"used Destructo Disc on",
"used Destructo Disc X2 on",
"used Destructo Disc Chain on",
"Big Bang Kamehameha'd",
"Big Bang Attacked",
"Galick Gunned",
"used Chuck Norris on",
"used Dragon Fist on",
"Final Kamehameha'd",
"Air striked",
"concrete donkey'd",
"super banana bombed",
"Holy Hand Grenaded"]
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say(" \n\n\n\n\nThe 30 duels are preset duels that are added automatically on first run. (Code looks like crap right now though :wink:)".format(ctx.prefix))
strbuffer = self.duel_show().split("\n")
mess = ""
if self.duelist[self.counter] == self.duelist[self.setter]:
await self.bot.say("**{}** out of **{}** spaces used! **MAXED OUT!!**".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
else:
await self.bot.say("**{}** out of **{}** spaces used!".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
for line in strbuffer:
if len(mess) + len(line) + 1 < 300:
mess += "\n" + line
else:
await self.bot.say(mess)
mess = ""
if mess != "":
await self.bot.say(mess)
else:
strbuffer = self.duel_show().split("\n")
mess = ""
if self.duelist[self.counter] == self.duelist[self.setter]:
await self.bot.say("**{}** out of **{}** spaces used! **MAXED OUT!!**".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
else:
await self.bot.say("**{}** out of **{}** spaces used!".format(len(self.duelist[self.nuels]), self.duelist[self.setter]))
for line in strbuffer:
if len(mess) + len(line) + 1 < 300:
mess += "\n" + line
else:
await self.bot.say(mess)
mess = ""
if mess != "":
await self.bot.say(mess)
@_duels.command(pass_context=True, no_pm=True)
async def remove (self, ctx, Duel : str):
"""removes a duel from the list"""
try:
x = self.duelist[self.nuels].remove(Duel)
if x is not ValueError:
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("{} has been successfully removed from the duel list!".format(Duel))
except ValueError:
await self.bot.say("I can't remove what hasn't been added to the list to begin with.")
@_duels.command(pass_context=True, no_pm=True)
async def reset (self, ctx):
"""For when you have waaay too many duels"""
if len(self.duelist[self.nuels]) > 0:
self.duelist[self.counter] = 0
self.duelist[self.nuels] = []
dataIO.save_json("data/duels/duelist.json", self.duelist)
dataIO.save_json("data/duels/duelist.json", self.duelist)
await self.bot.say("Duel list has been reset")
else:
await self.bot.say("I can't delete a list that's already empty!")
@_duels.command(pass_context=True)
async def timerreset(self, ctx):
"""Reset the duel timer, only use if the system hangs or breaks!"""
author = ctx.message.author
server = author.server
if server.id in self.timer_board:
if self.timer_board[server.id]["time"] == 0:
await self.bot.say("There isn't a timer right now (no duel running).")
else:
self.timer_board[server.id]["time"] = 0
await self.bot.say("Timer has been reset!")
else:
await self.bot.say("Please do {}tjoin to be added to the timer board!".format(ctx.prefix))
#This cog was made by Axaios and Ridinginstyle00. And any code taken from others we also credit them here, whether we know their name or not.
def duel_show (self):
ret = "```--------```"
for num, duels in enumerate(self.duelist[self.nuels]):
ret += str(num + 1) + ") `" + duels + "`\n"
ret += "```--------```"
return ret
def action_choose (self):
action = choice(sample(self.duelist[self.nuels],1))
return action
def multiple_action_choose (self):
action1 = self.action_choose()
action2 = self.action_choose()
action3 = self.action_choose()
action4 = self.action_choose()
return action1, action2, action3, action4
def action_damage (self):
action_chosen1, action_chosen2, action_chosen3, action_chosen4 = self.multiple_action_choose()
action_damage1 = self.duelist[self.nuels].index(action_chosen1)
action_damage2 = self.duelist[self.nuels].index(action_chosen2)
action_damage3 = self.duelist[self.nuels].index(action_chosen3)
action_damage4 = self.duelist[self.nuels].index(action_chosen4)
return action_damage1, action_damage2, action_damage3, action_damage4
def check_joined(self, id):
if id in self.wlt:
return True
else:
return False
def get_wins(self, id):
if self.check_joined(id):
return self.wlt[id]["Wins"]
def get_losses(self, id):
if self.check_joined(id):
return self.wlt[id]["Losses"]
def get_ties(self, id):
if self.check_joined(id):
return self.wlt[id]["Ties"]
def display_time(self, seconds, granularity=2): # What would I ever do without stackoverflow?
intervals = ( # Source: http://stackoverflow.com/a/24542445
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def check_folders():
if not os.path.exists("data/duels"):
print("Creating data/duels folder...")
os.mkdir("data/duels")
def check_files():
fp = "data/duels/duelist.json"
if not dataIO.is_valid_json(fp):
print("Creating duelist.json...")
dataIO.save_json(fp, {})
acc = "data/duels/account.json"
if not dataIO.is_valid_json(acc):
print("creating account.json...")
dataIO.save_json(acc, {})
fp = "data/duels/timer.json"
if not dataIO.is_valid_json(fp):
print("Creating timer.json...")
dataIO.save_json(fp, {})
def setup(bot):
global logger
check_folders()
check_files()
n = Duels(bot)
logger = logging.getLogger("red.duels")
if logger.level == 0: # Prevents the logger from being loaded again in case of module reload
logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename='data/duels/duels.log', encoding='utf-8', mode='a')
handler.setFormatter(logging.Formatter('%(asctime)s %(message)s', datefmt="[%d/%m/%Y %H:%M]"))
logger.addHandler(handler)
bot.add_cog(n) | en | 0.905531 | Duel with another player!! Add server to timer list Duel another player Adds a duel to the list Sets the maximum amount of duels that can be added #Save function here that isn't added yet #Save function here that isn't added yet Join tournament Show rank and XP of users.
Defaults to yours. Shows list of available duels removes a duel from the list For when you have waaay too many duels Reset the duel timer, only use if the system hangs or breaks! #This cog was made by Axaios and Ridinginstyle00. And any code taken from others we also credit them here, whether we know their name or not. # What would I ever do without stackoverflow? # Source: http://stackoverflow.com/a/24542445 # 60 * 60 * 24 * 7 # 60 * 60 * 24 # 60 * 60 # Prevents the logger from being loaded again in case of module reload | 2.482527 | 2 |
paranuara/citizens/models/citizens.py | SPLAYER-HD/Paranuara | 0 | 10029 | <gh_stars>0
"""Citizens model."""
# Django
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
# models
from paranuara.companies.models import Company
# PostgreSQL fields
from django.contrib.postgres.fields import JSONField
# Utilities
from paranuara.utils.models import ParanuaraModel
class Citizen(ParanuaraModel, AbstractUser):
"""Citizen model.
Extend from Django's Abstract User, change the username field
to email and add some extra fields.
"""
index = models.IntegerField(
unique=True,
default=-1
)
favorite_food = models.ManyToManyField(
'foods.Food',
related_name='favorite_food'
)
has_died = models.BooleanField(
'died',
default=False,
help_text=(
'Help easily distinguish citizens died or alive. '
)
)
balance = models.DecimalField(
max_digits=15,
decimal_places=2,
default=None
)
picture = models.ImageField(
'profile picture',
upload_to='paranuara/citizens/pictures/',
blank=True,
null=True
)
age = models.IntegerField(
default=-1
)
eyeColor = models.CharField(
max_length=50,
blank=False
)
gender = models.CharField(
max_length=6,
blank=True
)
email = models.EmailField(
'email address',
unique=True,
error_messages={
'unique': 'A user with that email already exists.'
}
)
phone_regex = RegexValidator(
regex=r'\+?1?\d{9,15}$',
message="Phone number must be entered in the format: +999999999. Up to 15 digits allowed."
)
phone = models.CharField(
validators=[phone_regex],
max_length=20,
blank=True
)
address = models.CharField(
max_length=100,
blank=True
)
company = models.ForeignKey(
Company,
related_name='employees_company',
on_delete=models.SET_NULL,
null=True
)
about = models.CharField(
max_length=1000,
blank=True,
null=True
)
greeting = models.CharField(
max_length=1000,
blank=True,
null=True
)
tags = JSONField(
default=None,
blank=True,
null=True
)
REQUIRED_FIELDS = ['has_died', 'eyeColor', 'index']
def get_relations(self):
return models.Relationship.objects.get(from_person=self)
class Relationship(models.Model):
"""Class to represent many to many relation between Ctizens"""
from_people = models.ForeignKey(Citizen, related_name='from_people', on_delete=models.CASCADE)
to_people = models.ForeignKey(Citizen, related_name='to_people', on_delete=models.CASCADE)
| """Citizens model."""
# Django
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
# models
from paranuara.companies.models import Company
# PostgreSQL fields
from django.contrib.postgres.fields import JSONField
# Utilities
from paranuara.utils.models import ParanuaraModel
class Citizen(ParanuaraModel, AbstractUser):
"""Citizen model.
Extend from Django's Abstract User, change the username field
to email and add some extra fields.
"""
index = models.IntegerField(
unique=True,
default=-1
)
favorite_food = models.ManyToManyField(
'foods.Food',
related_name='favorite_food'
)
has_died = models.BooleanField(
'died',
default=False,
help_text=(
'Help easily distinguish citizens died or alive. '
)
)
balance = models.DecimalField(
max_digits=15,
decimal_places=2,
default=None
)
picture = models.ImageField(
'profile picture',
upload_to='paranuara/citizens/pictures/',
blank=True,
null=True
)
age = models.IntegerField(
default=-1
)
eyeColor = models.CharField(
max_length=50,
blank=False
)
gender = models.CharField(
max_length=6,
blank=True
)
email = models.EmailField(
'email address',
unique=True,
error_messages={
'unique': 'A user with that email already exists.'
}
)
phone_regex = RegexValidator(
regex=r'\+?1?\d{9,15}$',
message="Phone number must be entered in the format: +999999999. Up to 15 digits allowed."
)
phone = models.CharField(
validators=[phone_regex],
max_length=20,
blank=True
)
address = models.CharField(
max_length=100,
blank=True
)
company = models.ForeignKey(
Company,
related_name='employees_company',
on_delete=models.SET_NULL,
null=True
)
about = models.CharField(
max_length=1000,
blank=True,
null=True
)
greeting = models.CharField(
max_length=1000,
blank=True,
null=True
)
tags = JSONField(
default=None,
blank=True,
null=True
)
REQUIRED_FIELDS = ['has_died', 'eyeColor', 'index']
def get_relations(self):
return models.Relationship.objects.get(from_person=self)
class Relationship(models.Model):
"""Class to represent many to many relation between Ctizens"""
from_people = models.ForeignKey(Citizen, related_name='from_people', on_delete=models.CASCADE)
to_people = models.ForeignKey(Citizen, related_name='to_people', on_delete=models.CASCADE) | en | 0.926295 | Citizens model. # Django # models # PostgreSQL fields # Utilities Citizen model. Extend from Django's Abstract User, change the username field to email and add some extra fields. Class to represent many to many relation between Ctizens | 2.641948 | 3 |
tests/utils.py | niwibe/cobrascript | 1 | 10030 | <reponame>niwibe/cobrascript<filename>tests/utils.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from cobra.utils import normalize
def norm(data):
return normalize(data)
| # -*- coding: utf-8 -*-
from cobra.utils import normalize
def norm(data):
return normalize(data) | en | 0.769321 | # -*- coding: utf-8 -*- | 1.872177 | 2 |
rgislackbot/dispatcher/dispatchconfig.py | raginggeek/RGISlackBot | 0 | 10031 | <reponame>raginggeek/RGISlackBot
class DispatchConfig:
def __init__(self, raw_config):
self.registered_commands = {}
for package in raw_config["handlers"]:
for command in package["commands"]:
self.registered_commands[command] = {
"class": package["class"],
"fullpath": ".".join([package["package"], package["module"], package["class"]])
}
def get_handler_by_command(self, command):
if command in self.registered_commands:
return self.registered_commands[command]
else:
return None
| class DispatchConfig:
def __init__(self, raw_config):
self.registered_commands = {}
for package in raw_config["handlers"]:
for command in package["commands"]:
self.registered_commands[command] = {
"class": package["class"],
"fullpath": ".".join([package["package"], package["module"], package["class"]])
}
def get_handler_by_command(self, command):
if command in self.registered_commands:
return self.registered_commands[command]
else:
return None | none | 1 | 2.303832 | 2 |
|
chess/rules.py | DevStrikerTech/Chess-Engine | 18 | 10032 | <gh_stars>10-100
import pygame
from chess.board import Board
from .variable_declaration import black_piece, white_piece, position_piece, board_square_size
class Rules:
def __init__(self, window):
self._init()
self.window = window
def update(self):
self.chess_board.draw_pieces(self.window)
self.draw_valid_moves(self.logical_moves)
pygame.display.update()
def _init(self):
self.current_piece = None
self.chess_board = Board()
self.turn_taken = black_piece
self.logical_moves = {}
def winner(self):
return self.chess_board.winner()
def reset(self):
self._init()
def select(self, board_row, board_column):
if self.current_piece:
result = self._move(board_row, board_column)
if not result:
self.current_piece = None
self.select(board_row, board_column)
piece = self.chess_board.get_pieces(board_row, board_column)
if piece != 0 and piece.piece_color == self.turn_taken:
self.current_piece = piece
self.logical_moves = self.chess_board.get_logical_moves(piece)
return True
return False
def _move(self, board_row, board_column):
piece = self.chess_board.get_pieces(board_row, board_column)
if self.current_piece and piece == 0 and (board_row, board_column) in self.logical_moves:
self.chess_board.move_pieces(self.current_piece, board_row, board_column)
skipped = self.logical_moves[(board_row, board_column)]
if skipped:
self.chess_board.remove(skipped)
self.change_turn()
else:
return False
return True
def draw_valid_moves(self, moves):
for move in moves:
row, col = move
pygame.draw.circle(self.window, position_piece,
(col * board_square_size + board_square_size // 2,
row * board_square_size + board_square_size // 2), 15)
def change_turn(self):
self.logical_moves = {}
if self.turn_taken == black_piece:
self.turn_taken = white_piece
else:
self.turn_taken = black_piece
def get_board(self):
return self.chess_board
def algorithm_move(self, chess_board):
self.chess_board = chess_board
self.change_turn()
| import pygame
from chess.board import Board
from .variable_declaration import black_piece, white_piece, position_piece, board_square_size
class Rules:
def __init__(self, window):
self._init()
self.window = window
def update(self):
self.chess_board.draw_pieces(self.window)
self.draw_valid_moves(self.logical_moves)
pygame.display.update()
def _init(self):
self.current_piece = None
self.chess_board = Board()
self.turn_taken = black_piece
self.logical_moves = {}
def winner(self):
return self.chess_board.winner()
def reset(self):
self._init()
def select(self, board_row, board_column):
if self.current_piece:
result = self._move(board_row, board_column)
if not result:
self.current_piece = None
self.select(board_row, board_column)
piece = self.chess_board.get_pieces(board_row, board_column)
if piece != 0 and piece.piece_color == self.turn_taken:
self.current_piece = piece
self.logical_moves = self.chess_board.get_logical_moves(piece)
return True
return False
def _move(self, board_row, board_column):
piece = self.chess_board.get_pieces(board_row, board_column)
if self.current_piece and piece == 0 and (board_row, board_column) in self.logical_moves:
self.chess_board.move_pieces(self.current_piece, board_row, board_column)
skipped = self.logical_moves[(board_row, board_column)]
if skipped:
self.chess_board.remove(skipped)
self.change_turn()
else:
return False
return True
def draw_valid_moves(self, moves):
for move in moves:
row, col = move
pygame.draw.circle(self.window, position_piece,
(col * board_square_size + board_square_size // 2,
row * board_square_size + board_square_size // 2), 15)
def change_turn(self):
self.logical_moves = {}
if self.turn_taken == black_piece:
self.turn_taken = white_piece
else:
self.turn_taken = black_piece
def get_board(self):
return self.chess_board
def algorithm_move(self, chess_board):
self.chess_board = chess_board
self.change_turn() | none | 1 | 3.447172 | 3 |
|
kivygames/games/noughtsandcrosses/__init__.py | jonathanjameswatson/kivygames | 0 | 10033 | import numpy as np
from kivygames.games import Game
import kivygames.games.noughtsandcrosses.c as c
class CellOccupiedError(Exception):
pass
class NoughtsAndCrosses(Game):
minPlayers = 2
maxPlayers = 2
hasAI = True
gridShape = (3, 3)
def __init__(self):
Game.__init__(self)
self.grid = np.zeros(self.gridShape, dtype="u1")
self.player = 1
def isEmpty(self, position):
return self.grid[position] == 0
async def turn(self):
await self.sendOutput("Player", self.player)
while True:
position = await self.getInput("Position", tuple, self.player)
if self.isEmpty(position):
break
await self.sendOutput("Error", "That space is already full.")
await self.sendOutput("Error", "")
self.grid[position] = self.player
await self.sendOutput("Grid", self.grid)
if c.hasPlayerWon(self.grid, self.player):
await self.sendOutput("End", f"Player {self.player} wins.")
return True
if np.count_nonzero(self.grid) == 9:
await self.sendOutput("End", f"It's a draw!")
return True
self.player = 3 - self.player
return False
def getAIInput(self, name):
if name == "Position":
return c.minimax(self.player, self.player, True, self.grid)[1]
async def game(self):
while True:
ended = await self.turn()
if ended:
break
await self.end()
| import numpy as np
from kivygames.games import Game
import kivygames.games.noughtsandcrosses.c as c
class CellOccupiedError(Exception):
pass
class NoughtsAndCrosses(Game):
minPlayers = 2
maxPlayers = 2
hasAI = True
gridShape = (3, 3)
def __init__(self):
Game.__init__(self)
self.grid = np.zeros(self.gridShape, dtype="u1")
self.player = 1
def isEmpty(self, position):
return self.grid[position] == 0
async def turn(self):
await self.sendOutput("Player", self.player)
while True:
position = await self.getInput("Position", tuple, self.player)
if self.isEmpty(position):
break
await self.sendOutput("Error", "That space is already full.")
await self.sendOutput("Error", "")
self.grid[position] = self.player
await self.sendOutput("Grid", self.grid)
if c.hasPlayerWon(self.grid, self.player):
await self.sendOutput("End", f"Player {self.player} wins.")
return True
if np.count_nonzero(self.grid) == 9:
await self.sendOutput("End", f"It's a draw!")
return True
self.player = 3 - self.player
return False
def getAIInput(self, name):
if name == "Position":
return c.minimax(self.player, self.player, True, self.grid)[1]
async def game(self):
while True:
ended = await self.turn()
if ended:
break
await self.end()
| none | 1 | 2.908558 | 3 |
|
03-Decouvrez-POO/download_agents.py | gruiick/openclassrooms-py | 0 | 10034 | <reponame>gruiick/openclassrooms-py
#! /usr/bin/env python
import argparse
import json
import time
import urllib.error
import urllib.request
def main():
parser = argparse.ArgumentParser(description="Download agents from pplapi.com")
parser.add_argument("-c", "--count", type=int, default=10, help="Number of agents to download.")
parser.add_argument("-d", "--dest", help="Destination file. If absent, will print to stdout")
args = parser.parse_args()
agents = []
while len(agents) < args.count:
if agents:
# Wait one second between every request
time.sleep(1)
request_count = min(args.count - len(agents), 500)
try:
response = urllib.request.urlopen("http://pplapi.com/batch/{}/sample.json".format(request_count))
agents += json.loads(response.read().decode("utf8"))
except urllib.error.HTTPError:
print("Too may requests, sleeping 10s ({} agents)".format(len(agents)))
time.sleep(10)
result = json.dumps(agents, indent=2, sort_keys=True)
if args.dest:
with open(args.dest, 'w') as out_f:
out_f.write(result)
else:
print(result)
if __name__ == "__main__":
main()
| #! /usr/bin/env python
import argparse
import json
import time
import urllib.error
import urllib.request
def main():
parser = argparse.ArgumentParser(description="Download agents from pplapi.com")
parser.add_argument("-c", "--count", type=int, default=10, help="Number of agents to download.")
parser.add_argument("-d", "--dest", help="Destination file. If absent, will print to stdout")
args = parser.parse_args()
agents = []
while len(agents) < args.count:
if agents:
# Wait one second between every request
time.sleep(1)
request_count = min(args.count - len(agents), 500)
try:
response = urllib.request.urlopen("http://pplapi.com/batch/{}/sample.json".format(request_count))
agents += json.loads(response.read().decode("utf8"))
except urllib.error.HTTPError:
print("Too may requests, sleeping 10s ({} agents)".format(len(agents)))
time.sleep(10)
result = json.dumps(agents, indent=2, sort_keys=True)
if args.dest:
with open(args.dest, 'w') as out_f:
out_f.write(result)
else:
print(result)
if __name__ == "__main__":
main() | en | 0.796401 | #! /usr/bin/env python # Wait one second between every request | 2.998523 | 3 |
Commands/images.py | Mariobob/Proton | 0 | 10035 | <reponame>Mariobob/Proton<gh_stars>0
import functools
import re
import asyncio
from io import BytesIO
from discord.ext import commands
import discord
from Utils import canvas
import random
class Images:
"""
Contains commands for manipulation of images.
"""
def __init__(self, bot):
self.bot = bot
self.imageClient = canvas.Client(bot)
@commands.command(name="illegal")
async def illegal(self, ctx, *, args=None):
"""Ask US President Donald Trump to make something illegal."""
if args is None:
await ctx.send("Please provide something to make it illegal.")
return
if len(args) > 10 or len(args) < 1:
await ctx.send("You can make only 1 to 10 lettered things illegal.")
return
elif not bool(re.match('^[a-zA-Z0-9]+$', args)):
await ctx.send("Oops! Only alphanumeric characters are allowed.")
return
payload = {"task": "gif", "word": args.upper()}
async with ctx.message.channel.typing():
message = await ctx.send(f"Convincing US President <NAME> to make `{args}` illegal.")
async with self.bot.session.post("https://is-now-illegal.firebaseio.com/queue/tasks.json", json=payload) as resp:
pass
await asyncio.sleep(5)
url = f"https://storage.googleapis.com/is-now-illegal.appspot.com/gifs/{args.upper()}.gif"
async with self.bot.session.get(url) as resp:
image = await resp.read()
await ctx.send(file=discord.File(BytesIO(image), "illegal.gif"))
await message.delete()
@commands.command(name="beautiful")
async def beautiful(self, ctx, user: discord.Member = None):
"""This... this is beautiful!"""
member = user or ctx.author
async with ctx.typing():
avatar = await self.imageClient.getAvatar(user=member, size=128)
func = functools.partial(self.imageClient.beautify, avatar)
image = await self.bot.loop.run_in_executor(None, func)
await ctx.send(file=discord.File(fp=image, filename="beautiful.png"))
@commands.command(name="delet")
async def delet(self, ctx, user: discord.Member = None):
"""Delet this garbage!"""
member = user or ctx.author
async with ctx.typing():
avatar = await self.imageClient.getAvatar(user=member, size=128)
func = functools.partial(self.imageClient.deletify, avatar, f"{member.name}#{member.discriminator}")
image = await self.bot.loop.run_in_executor(None, func)
await ctx.send(file=discord.File(fp=image, filename="delet.png"))
@commands.command(name="robot")
async def robot(self, ctx, *, args=None):
"""See a unique robot image from any text."""
if args is None:
args = ctx.author.name
randomInt = random.randrange(1, 3)
async with ctx.typing():
image = await self.imageClient.getRobotImage(args, randomInt)
file = discord.File(fp=image, filename=f"{args}.png")
await ctx.send(file=file)
@commands.command(name="thuglife")
async def thuglife(self, ctx, user: discord.Member = None):
"""Thug Life....."""
member = user or ctx.author
async with ctx.typing():
avatar = await self.imageClient.getAvatar(user=member, size=512)
func = functools.partial(self.imageClient.thugLife, avatar)
image = await self.bot.loop.run_in_executor(None, func)
await ctx.send(file=discord.File(fp=image, filename="thuglife.png"))
def setup(bot):
bot.add_cog(Images(bot)) | import functools
import re
import asyncio
from io import BytesIO
from discord.ext import commands
import discord
from Utils import canvas
import random
class Images:
"""
Contains commands for manipulation of images.
"""
def __init__(self, bot):
self.bot = bot
self.imageClient = canvas.Client(bot)
@commands.command(name="illegal")
async def illegal(self, ctx, *, args=None):
"""Ask US President Donald Trump to make something illegal."""
if args is None:
await ctx.send("Please provide something to make it illegal.")
return
if len(args) > 10 or len(args) < 1:
await ctx.send("You can make only 1 to 10 lettered things illegal.")
return
elif not bool(re.match('^[a-zA-Z0-9]+$', args)):
await ctx.send("Oops! Only alphanumeric characters are allowed.")
return
payload = {"task": "gif", "word": args.upper()}
async with ctx.message.channel.typing():
message = await ctx.send(f"Convincing US President <NAME> to make `{args}` illegal.")
async with self.bot.session.post("https://is-now-illegal.firebaseio.com/queue/tasks.json", json=payload) as resp:
pass
await asyncio.sleep(5)
url = f"https://storage.googleapis.com/is-now-illegal.appspot.com/gifs/{args.upper()}.gif"
async with self.bot.session.get(url) as resp:
image = await resp.read()
await ctx.send(file=discord.File(BytesIO(image), "illegal.gif"))
await message.delete()
@commands.command(name="beautiful")
async def beautiful(self, ctx, user: discord.Member = None):
"""This... this is beautiful!"""
member = user or ctx.author
async with ctx.typing():
avatar = await self.imageClient.getAvatar(user=member, size=128)
func = functools.partial(self.imageClient.beautify, avatar)
image = await self.bot.loop.run_in_executor(None, func)
await ctx.send(file=discord.File(fp=image, filename="beautiful.png"))
@commands.command(name="delet")
async def delet(self, ctx, user: discord.Member = None):
"""Delet this garbage!"""
member = user or ctx.author
async with ctx.typing():
avatar = await self.imageClient.getAvatar(user=member, size=128)
func = functools.partial(self.imageClient.deletify, avatar, f"{member.name}#{member.discriminator}")
image = await self.bot.loop.run_in_executor(None, func)
await ctx.send(file=discord.File(fp=image, filename="delet.png"))
@commands.command(name="robot")
async def robot(self, ctx, *, args=None):
"""See a unique robot image from any text."""
if args is None:
args = ctx.author.name
randomInt = random.randrange(1, 3)
async with ctx.typing():
image = await self.imageClient.getRobotImage(args, randomInt)
file = discord.File(fp=image, filename=f"{args}.png")
await ctx.send(file=file)
@commands.command(name="thuglife")
async def thuglife(self, ctx, user: discord.Member = None):
"""Thug Life....."""
member = user or ctx.author
async with ctx.typing():
avatar = await self.imageClient.getAvatar(user=member, size=512)
func = functools.partial(self.imageClient.thugLife, avatar)
image = await self.bot.loop.run_in_executor(None, func)
await ctx.send(file=discord.File(fp=image, filename="thuglife.png"))
def setup(bot):
bot.add_cog(Images(bot)) | en | 0.789627 | Contains commands for manipulation of images. Ask US President Donald Trump to make something illegal. This... this is beautiful! Delet this garbage! #{member.discriminator}") See a unique robot image from any text. Thug Life..... | 2.928238 | 3 |
labJS/conf.py | lpomfrey/django-labjs | 0 | 10036 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from appconf import AppConf
from django.conf import settings # noqa
class LabjsConf(AppConf):
ENABLED = not settings.DEBUG
DEBUG_TOGGLE = 'labjs'
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from appconf import AppConf
from django.conf import settings # noqa
class LabjsConf(AppConf):
ENABLED = not settings.DEBUG
DEBUG_TOGGLE = 'labjs'
| en | 0.744791 | # -*- coding: utf-8 -*- # noqa | 1.102366 | 1 |
tests/test_nanoevents_vector.py | danbarto/coffea | 0 | 10037 | import awkward as ak
from coffea.nanoevents.methods import vector
import pytest
ATOL = 1e-8
def record_arrays_equal(a, b):
return (ak.fields(a) == ak.fields(b)) and all(ak.all(a[f] == b[f]) for f in ak.fields(a))
def test_two_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]]
},
with_name="TwoVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[11, 12], [], [13], [14]],
"y": [[15, 16], [], [17], [18]]
},
with_name="TwoVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[12, 14], [], [16], [18]],
"y": [[20, 22], [], [24], [26]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-10, -10], [], [-10], [-10]],
"y": [[-10, -10], [], [-10], [-10]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]]
}
))
assert record_arrays_equal(a.dot(b), ak.Array([[86, 120], [], [158], [200]]))
assert record_arrays_equal(b.dot(a), ak.Array([[86, 120], [], [158], [200]]))
assert ak.all(abs(a.unit.r - 1) < ATOL)
assert ak.all(abs(a.unit.phi - a.phi) < ATOL)
def test_polar_two_vector():
a = ak.zip(
{
"r": [[1, 2], [], [3], [4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
},
with_name="PolarTwoVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert record_arrays_equal(a * 2, ak.zip(
{
"r": [[2, 4], [], [6], [8]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]]
}
))
assert ak.all((a * (-2)).r == [[2, 4], [], [6], [8]])
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"r": [[0.5, 1], [], [1.5], [2]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]]
}
))
assert ak.all(abs((-a).x + a.x) < ATOL)
assert ak.all(abs((-a).y + a.y) < ATOL)
assert record_arrays_equal(a * (-1), -a)
assert ak.all(a.unit.phi == a.phi)
def test_three_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]]
},
with_name="ThreeVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[4, 1], [], [10], [11]],
"y": [[17, 7], [], [11], [6]],
"z": [[9, 11], [], [5], [16]]
},
with_name="ThreeVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]],
"z": [[-9, -10], [], [-11], [-12]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[5, 3], [], [13], [15]],
"y": [[22, 13], [], [18], [14]],
"z": [[18, 21], [], [16], [28]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-3, 1], [], [-7], [-7]],
"y": [[-12, -1], [], [-4], [2]],
"z": [[0, -1], [], [6], [-4]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]],
"z": [[18, 20], [], [22], [24]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]],
"z": [[4.5, 5], [], [5.5], [6]]
}
))
assert ak.all(a.dot(b) == ak.Array([[170, 154], [], [162], [284]]))
assert ak.all(b.dot(a) == ak.Array([[170, 154], [], [162], [284]]))
assert record_arrays_equal(a.cross(b), ak.zip(
{
"x": [[-108, -4], [], [-86], [56]],
"y": [[27, -12], [], [95], [68]],
"z": [[-3, 8], [], [-37], [-64]]
}
))
assert record_arrays_equal(b.cross(a), ak.zip(
{
"x": [[108, 4], [], [86], [-56]],
"y": [[-27, 12], [], [-95], [-68]],
"z": [[3, -8], [], [37], [64]]
}
))
assert ak.all(abs(a.unit.rho - 1) < ATOL)
assert ak.all(abs(a.unit.phi - a.phi) < ATOL)
def test_spherical_three_vector():
a = ak.zip(
{
"rho": [[1.0, 2.0], [], [3.0], [4.0]],
"theta": [[1.2, 0.7], [], [1.8], [1.9]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
},
with_name="SphericalThreeVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all(abs((-a).x + a.x) < ATOL)
assert ak.all(abs((-a).y + a.y) < ATOL)
assert ak.all(abs((-a).z + a.z) < ATOL)
assert record_arrays_equal(a * (-1), -a)
def test_lorentz_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]],
"t": [[50, 51], [], [52], [53]]
},
with_name="LorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[4, 1], [], [10], [11]],
"y": [[17, 7], [], [11], [6]],
"z": [[9, 11], [], [5], [16]],
"t": [[60, 61], [], [62], [63]]
},
with_name="LorentzVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]],
"z": [[-9, -10], [], [-11], [-12]],
"t": [[-50, -51], [], [-52], [-53]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[5, 3], [], [13], [15]],
"y": [[22, 13], [], [18], [14]],
"z": [[18, 21], [], [16], [28]],
"t": [[110, 112], [], [114], [116]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-3, 1], [], [-7], [-7]],
"y": [[-12, -1], [], [-4], [2]],
"z": [[0, -1], [], [6], [-4]],
"t": [[-10, -10], [], [-10], [-10]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]],
"z": [[18, 20], [], [22], [24]],
"t": [[100, 102], [], [104], [106]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]],
"z": [[4.5, 5], [], [5.5], [6]],
"t": [[25, 25.5], [], [26], [26.5]]
}
))
assert record_arrays_equal(a.pvec, ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]],
}
))
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
def test_pt_eta_phi_m_lorentz_vector():
a = ak.zip(
{
"pt": [[1, 2], [], [3], [4]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"mass": [[0.5, 0.9], [], [1.3], [4.5]]
},
with_name="PtEtaPhiMLorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all((a * (-2)).pt == ak.Array([[2, 4], [], [6], [8]]))
assert ak.all((a * (-2)).theta - ak.Array([
[2.556488570968, 2.65804615357],
[],
[2.74315571762],
[3.07487087733]
]) < ATOL)
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"pt": [[0.5, 1], [], [1.5], [2]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"mass": [[0.25, 0.45], [], [0.65], [2.25]]
}
))
assert record_arrays_equal(a * (-1), -a)
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
def test_pt_eta_phi_e_lorentz_vector():
a = ak.zip(
{
"pt": [[1, 2], [], [3], [4]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"energy": [[50, 51], [], [52], [60]]
},
with_name="PtEtaPhiELorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all((a * (-2)).pt == ak.Array([[2, 4], [], [6], [8]]))
assert ak.all((a * (-2)).theta - ak.Array([
[2.556488570968, 2.65804615357],
[],
[2.74315571762],
[3.07487087733]
]) < ATOL)
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"pt": [[0.5, 1], [], [1.5], [2]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"energy": [[25, 25.5], [], [26], [30]]
}
))
assert record_arrays_equal(a * (-1), -a)
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
| import awkward as ak
from coffea.nanoevents.methods import vector
import pytest
ATOL = 1e-8
def record_arrays_equal(a, b):
return (ak.fields(a) == ak.fields(b)) and all(ak.all(a[f] == b[f]) for f in ak.fields(a))
def test_two_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]]
},
with_name="TwoVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[11, 12], [], [13], [14]],
"y": [[15, 16], [], [17], [18]]
},
with_name="TwoVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[12, 14], [], [16], [18]],
"y": [[20, 22], [], [24], [26]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-10, -10], [], [-10], [-10]],
"y": [[-10, -10], [], [-10], [-10]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]]
}
))
assert record_arrays_equal(a.dot(b), ak.Array([[86, 120], [], [158], [200]]))
assert record_arrays_equal(b.dot(a), ak.Array([[86, 120], [], [158], [200]]))
assert ak.all(abs(a.unit.r - 1) < ATOL)
assert ak.all(abs(a.unit.phi - a.phi) < ATOL)
def test_polar_two_vector():
a = ak.zip(
{
"r": [[1, 2], [], [3], [4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
},
with_name="PolarTwoVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert record_arrays_equal(a * 2, ak.zip(
{
"r": [[2, 4], [], [6], [8]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]]
}
))
assert ak.all((a * (-2)).r == [[2, 4], [], [6], [8]])
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"r": [[0.5, 1], [], [1.5], [2]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]]
}
))
assert ak.all(abs((-a).x + a.x) < ATOL)
assert ak.all(abs((-a).y + a.y) < ATOL)
assert record_arrays_equal(a * (-1), -a)
assert ak.all(a.unit.phi == a.phi)
def test_three_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]]
},
with_name="ThreeVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[4, 1], [], [10], [11]],
"y": [[17, 7], [], [11], [6]],
"z": [[9, 11], [], [5], [16]]
},
with_name="ThreeVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]],
"z": [[-9, -10], [], [-11], [-12]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[5, 3], [], [13], [15]],
"y": [[22, 13], [], [18], [14]],
"z": [[18, 21], [], [16], [28]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-3, 1], [], [-7], [-7]],
"y": [[-12, -1], [], [-4], [2]],
"z": [[0, -1], [], [6], [-4]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]],
"z": [[18, 20], [], [22], [24]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]],
"z": [[4.5, 5], [], [5.5], [6]]
}
))
assert ak.all(a.dot(b) == ak.Array([[170, 154], [], [162], [284]]))
assert ak.all(b.dot(a) == ak.Array([[170, 154], [], [162], [284]]))
assert record_arrays_equal(a.cross(b), ak.zip(
{
"x": [[-108, -4], [], [-86], [56]],
"y": [[27, -12], [], [95], [68]],
"z": [[-3, 8], [], [-37], [-64]]
}
))
assert record_arrays_equal(b.cross(a), ak.zip(
{
"x": [[108, 4], [], [86], [-56]],
"y": [[-27, 12], [], [-95], [-68]],
"z": [[3, -8], [], [37], [64]]
}
))
assert ak.all(abs(a.unit.rho - 1) < ATOL)
assert ak.all(abs(a.unit.phi - a.phi) < ATOL)
def test_spherical_three_vector():
a = ak.zip(
{
"rho": [[1.0, 2.0], [], [3.0], [4.0]],
"theta": [[1.2, 0.7], [], [1.8], [1.9]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
},
with_name="SphericalThreeVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all(abs((-a).x + a.x) < ATOL)
assert ak.all(abs((-a).y + a.y) < ATOL)
assert ak.all(abs((-a).z + a.z) < ATOL)
assert record_arrays_equal(a * (-1), -a)
def test_lorentz_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]],
"t": [[50, 51], [], [52], [53]]
},
with_name="LorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[4, 1], [], [10], [11]],
"y": [[17, 7], [], [11], [6]],
"z": [[9, 11], [], [5], [16]],
"t": [[60, 61], [], [62], [63]]
},
with_name="LorentzVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]],
"z": [[-9, -10], [], [-11], [-12]],
"t": [[-50, -51], [], [-52], [-53]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[5, 3], [], [13], [15]],
"y": [[22, 13], [], [18], [14]],
"z": [[18, 21], [], [16], [28]],
"t": [[110, 112], [], [114], [116]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-3, 1], [], [-7], [-7]],
"y": [[-12, -1], [], [-4], [2]],
"z": [[0, -1], [], [6], [-4]],
"t": [[-10, -10], [], [-10], [-10]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]],
"z": [[18, 20], [], [22], [24]],
"t": [[100, 102], [], [104], [106]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]],
"z": [[4.5, 5], [], [5.5], [6]],
"t": [[25, 25.5], [], [26], [26.5]]
}
))
assert record_arrays_equal(a.pvec, ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]],
}
))
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
def test_pt_eta_phi_m_lorentz_vector():
a = ak.zip(
{
"pt": [[1, 2], [], [3], [4]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"mass": [[0.5, 0.9], [], [1.3], [4.5]]
},
with_name="PtEtaPhiMLorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all((a * (-2)).pt == ak.Array([[2, 4], [], [6], [8]]))
assert ak.all((a * (-2)).theta - ak.Array([
[2.556488570968, 2.65804615357],
[],
[2.74315571762],
[3.07487087733]
]) < ATOL)
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"pt": [[0.5, 1], [], [1.5], [2]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"mass": [[0.25, 0.45], [], [0.65], [2.25]]
}
))
assert record_arrays_equal(a * (-1), -a)
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
def test_pt_eta_phi_e_lorentz_vector():
a = ak.zip(
{
"pt": [[1, 2], [], [3], [4]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"energy": [[50, 51], [], [52], [60]]
},
with_name="PtEtaPhiELorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all((a * (-2)).pt == ak.Array([[2, 4], [], [6], [8]]))
assert ak.all((a * (-2)).theta - ak.Array([
[2.556488570968, 2.65804615357],
[],
[2.74315571762],
[3.07487087733]
]) < ATOL)
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"pt": [[0.5, 1], [], [1.5], [2]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"energy": [[25, 25.5], [], [26], [30]]
}
))
assert record_arrays_equal(a * (-1), -a)
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
| none | 1 | 2.345596 | 2 |
|
testing/python/telBuggyScript2.py | sys-bio/rrplugins | 0 | 10038 | import roadrunner
import teplugins as tel
i = 0
#for i in range(100):
try:
noisePlugin = tel.Plugin ("tel_add_noise")
print noisePlugin.listOfProperties()
# Create a roadrunner instance
rr = roadrunner.RoadRunner()
rr.load("sbml_test_0001.xml")
# Generate data
data = rr.simulate(0, 10, 511) # Want 512 points
# Get the dataseries from roadrunner
d = tel.getDataSeries (data)
# Assign the dataseries to the plugin inputdata
noisePlugin.InputData = d
# Set parameter for the 'size' of the noise
noisePlugin.Sigma = 3.e-6
# Add the noise
noisePlugin.execute()
# Get the data to plot
noisePlugin.InputData.plot()
# tel.show()
d.writeDataSeries ("testData2.dat")
d.readDataSeries ("testData2.dat")
print "done"
print i
except Exception as e:
print 'Problem: ' + `e`
| import roadrunner
import teplugins as tel
i = 0
#for i in range(100):
try:
noisePlugin = tel.Plugin ("tel_add_noise")
print noisePlugin.listOfProperties()
# Create a roadrunner instance
rr = roadrunner.RoadRunner()
rr.load("sbml_test_0001.xml")
# Generate data
data = rr.simulate(0, 10, 511) # Want 512 points
# Get the dataseries from roadrunner
d = tel.getDataSeries (data)
# Assign the dataseries to the plugin inputdata
noisePlugin.InputData = d
# Set parameter for the 'size' of the noise
noisePlugin.Sigma = 3.e-6
# Add the noise
noisePlugin.execute()
# Get the data to plot
noisePlugin.InputData.plot()
# tel.show()
d.writeDataSeries ("testData2.dat")
d.readDataSeries ("testData2.dat")
print "done"
print i
except Exception as e:
print 'Problem: ' + `e`
| en | 0.392859 | #for i in range(100): # Create a roadrunner instance # Generate data # Want 512 points # Get the dataseries from roadrunner # Assign the dataseries to the plugin inputdata # Set parameter for the 'size' of the noise # Add the noise # Get the data to plot # tel.show() | 2.441255 | 2 |
encyclopaedia/labels.py | tcyrus/renpy-encyclopaedia | 0 | 10039 | <reponame>tcyrus/renpy-encyclopaedia
from renpy import store
class Labels(store.object):
"""Controls how the labels that display Encyclopaedia data appear.
Attributes:
percentage_label (str): Placed next to the percentage unlocked number
page_label (str): Placed before the entry page displayed
page_separator_label (str): Placed in-between the
current page number and the total page number
sort_number_label (str): Label for Number Sorting
sort_alphabetical_label (str): Label for Alphabetical sorting
sort_reverse_alphabetical_label (str): Label for Reverse Alphabetical
sorting
sort_subject_label (str): Label for Subject sorting
sort_unread_label (str): Label for Unread sorting
unread_entry_label (str): Default for the tag next to unread entries
locked_entry_label (str): Default for a "Locked Entry" button
"""
def __init__(self, encyclopaedia):
self.encyclopaedia = encyclopaedia
self.percentage_label = '%'
self.page_label = 'Page'
self.page_separator_label = '/'
self.sort_number_label = "Number"
self.sort_alphabetical_label = "A to Z"
self.sort_reverse_alphabetical_label = "Z to A"
self.sort_subject_label = "Subject"
self.sort_unread_label = "Unread"
self.unread_entry_label = "New!"
self.locked_entry_label = "???"
@property
def percentage_unlocked(self):
"""Percentage representation of the amount of the encyclopaedia
that's unlocked. ie: '50%'.
Returns:
str
"""
percentage_unlocked = int(self.encyclopaedia.percentage_unlocked)
return "{}{}".format(percentage_unlocked, self.percentage_label)
@property
def entry_current_page(self):
"""The sub-page of an entry that is being viewed.
Returns:
str
"""
try:
total_pages = self.encyclopaedia.active.pages
except AttributeError:
raise AttributeError(
"Cannot display Entry's current page when no entry is open."
)
label = "{0} {1} {2} {3}".format(
self.page_label,
self.encyclopaedia.sub_current_position,
self.page_separator_label,
total_pages
)
return label
@property
def sorting_mode(self):
"""Label for the encyclopaedia's current sorting mode.
Returns:
str
"""
enc = self.encyclopaedia
sorting_strings = {
enc.SORT_NUMBER: self.sort_number_label,
enc.SORT_ALPHABETICAL: self.sort_alphabetical_label,
enc.SORT_REVERSE_ALPHABETICAL: self.sort_reverse_alphabetical_label, # NOQA: E501
enc.SORT_SUBJECT: self.sort_subject_label,
enc.SORT_UNREAD: self.sort_unread_label
}
return sorting_strings[enc.sorting_mode]
| from renpy import store
class Labels(store.object):
"""Controls how the labels that display Encyclopaedia data appear.
Attributes:
percentage_label (str): Placed next to the percentage unlocked number
page_label (str): Placed before the entry page displayed
page_separator_label (str): Placed in-between the
current page number and the total page number
sort_number_label (str): Label for Number Sorting
sort_alphabetical_label (str): Label for Alphabetical sorting
sort_reverse_alphabetical_label (str): Label for Reverse Alphabetical
sorting
sort_subject_label (str): Label for Subject sorting
sort_unread_label (str): Label for Unread sorting
unread_entry_label (str): Default for the tag next to unread entries
locked_entry_label (str): Default for a "Locked Entry" button
"""
def __init__(self, encyclopaedia):
self.encyclopaedia = encyclopaedia
self.percentage_label = '%'
self.page_label = 'Page'
self.page_separator_label = '/'
self.sort_number_label = "Number"
self.sort_alphabetical_label = "A to Z"
self.sort_reverse_alphabetical_label = "Z to A"
self.sort_subject_label = "Subject"
self.sort_unread_label = "Unread"
self.unread_entry_label = "New!"
self.locked_entry_label = "???"
@property
def percentage_unlocked(self):
"""Percentage representation of the amount of the encyclopaedia
that's unlocked. ie: '50%'.
Returns:
str
"""
percentage_unlocked = int(self.encyclopaedia.percentage_unlocked)
return "{}{}".format(percentage_unlocked, self.percentage_label)
@property
def entry_current_page(self):
"""The sub-page of an entry that is being viewed.
Returns:
str
"""
try:
total_pages = self.encyclopaedia.active.pages
except AttributeError:
raise AttributeError(
"Cannot display Entry's current page when no entry is open."
)
label = "{0} {1} {2} {3}".format(
self.page_label,
self.encyclopaedia.sub_current_position,
self.page_separator_label,
total_pages
)
return label
@property
def sorting_mode(self):
"""Label for the encyclopaedia's current sorting mode.
Returns:
str
"""
enc = self.encyclopaedia
sorting_strings = {
enc.SORT_NUMBER: self.sort_number_label,
enc.SORT_ALPHABETICAL: self.sort_alphabetical_label,
enc.SORT_REVERSE_ALPHABETICAL: self.sort_reverse_alphabetical_label, # NOQA: E501
enc.SORT_SUBJECT: self.sort_subject_label,
enc.SORT_UNREAD: self.sort_unread_label
}
return sorting_strings[enc.sorting_mode] | en | 0.605425 | Controls how the labels that display Encyclopaedia data appear. Attributes: percentage_label (str): Placed next to the percentage unlocked number page_label (str): Placed before the entry page displayed page_separator_label (str): Placed in-between the current page number and the total page number sort_number_label (str): Label for Number Sorting sort_alphabetical_label (str): Label for Alphabetical sorting sort_reverse_alphabetical_label (str): Label for Reverse Alphabetical sorting sort_subject_label (str): Label for Subject sorting sort_unread_label (str): Label for Unread sorting unread_entry_label (str): Default for the tag next to unread entries locked_entry_label (str): Default for a "Locked Entry" button Percentage representation of the amount of the encyclopaedia that's unlocked. ie: '50%'. Returns: str The sub-page of an entry that is being viewed. Returns: str Label for the encyclopaedia's current sorting mode. Returns: str # NOQA: E501 | 3.208695 | 3 |
ncservice/ncDeviceOps/threaded/get_configs.py | cunningr/yanccm | 0 | 10040 | <filename>ncservice/ncDeviceOps/threaded/get_configs.py
import logging
from ncservice.ncDeviceOps.nc_device_ops import NcDeviceOps
from ncservice.ncDeviceOps.task_report import TaskReport
from ncservice.ncDeviceOps.threaded.base_thread_class import BaseThreadClass
logger = logging.getLogger('main.{}'.format(__name__))
extra = {'signature': '---SIGNATURE-NOT-SET---'}
class GetConfigs(BaseThreadClass):
def __init__(self, service):
super().__init__()
self.service = service
self.results = TaskReport(service)
def get_configs(self):
logger.debug('Requesting thread queue for _th_read_configs', extra=extra)
enclosure_queue = self.create_thread_queue(
self._th_read_configs
)
for device in self.service:
enclosure_queue.put(device)
enclosure_queue.join()
return self.results
def _th_read_configs(self, tid, queue):
while True:
target_device = queue.get()
device_name = target_device['device']
host = target_device['host']
port = target_device.get('ncport', 830)
session = NcDeviceOps(host, port=port, tid=tid)
current_config = session.nc_get_configs()
if current_config is not None:
self.results.set_device_config_data('original_running_configs', device_name, current_config)
self.results.set_device_config_data('current_running_configs', device_name, current_config)
self.results.set_service_result(device_name, 'SUCCESS')
else:
logger.error('TID-{}: Unable to retrieve config for device: {}'
.format(tid, device_name), extra=extra)
queue.task_done()
continue
session.close_session()
queue.task_done()
| <filename>ncservice/ncDeviceOps/threaded/get_configs.py
import logging
from ncservice.ncDeviceOps.nc_device_ops import NcDeviceOps
from ncservice.ncDeviceOps.task_report import TaskReport
from ncservice.ncDeviceOps.threaded.base_thread_class import BaseThreadClass
logger = logging.getLogger('main.{}'.format(__name__))
extra = {'signature': '---SIGNATURE-NOT-SET---'}
class GetConfigs(BaseThreadClass):
def __init__(self, service):
super().__init__()
self.service = service
self.results = TaskReport(service)
def get_configs(self):
logger.debug('Requesting thread queue for _th_read_configs', extra=extra)
enclosure_queue = self.create_thread_queue(
self._th_read_configs
)
for device in self.service:
enclosure_queue.put(device)
enclosure_queue.join()
return self.results
def _th_read_configs(self, tid, queue):
while True:
target_device = queue.get()
device_name = target_device['device']
host = target_device['host']
port = target_device.get('ncport', 830)
session = NcDeviceOps(host, port=port, tid=tid)
current_config = session.nc_get_configs()
if current_config is not None:
self.results.set_device_config_data('original_running_configs', device_name, current_config)
self.results.set_device_config_data('current_running_configs', device_name, current_config)
self.results.set_service_result(device_name, 'SUCCESS')
else:
logger.error('TID-{}: Unable to retrieve config for device: {}'
.format(tid, device_name), extra=extra)
queue.task_done()
continue
session.close_session()
queue.task_done()
| none | 1 | 2.062717 | 2 |
|
CodeHS/Looping/DoubleForLoop.py | Kev-in123/ICS2O7 | 2 | 10041 | <gh_stars>1-10
"""
This program visualizes nested for loops by printing number 0 through 3
and then 0 through 3 for the nested loop.
"""
for i in range(4):
print("Outer for loop: " + str(i))
for j in range(4):
print(" Inner for loop: " + str(j)) | """
This program visualizes nested for loops by printing number 0 through 3
and then 0 through 3 for the nested loop.
"""
for i in range(4):
print("Outer for loop: " + str(i))
for j in range(4):
print(" Inner for loop: " + str(j)) | en | 0.932416 | This program visualizes nested for loops by printing number 0 through 3
and then 0 through 3 for the nested loop. | 4.525121 | 5 |
saleor/graphql/channel/tests/test_base_channel_listing.py | fairhopeweb/saleor | 15,337 | 10042 | from collections import defaultdict
import graphene
import pytest
from django.core.exceptions import ValidationError
from ....shipping.error_codes import ShippingErrorCode
from ..mutations import BaseChannelListingMutation
def test_validate_duplicated_channel_ids(channel_PLN, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.validate_duplicated_channel_ids(
[channel_id],
[second_channel_id],
errors,
ShippingErrorCode.DUPLICATED_INPUT_ITEM.value,
)
# then
assert result is None
assert errors["input"] == []
def test_validate_duplicated_channel_ids_with_duplicates(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.validate_duplicated_channel_ids(
[channel_id], [second_channel_id], errors, error_code
)
# then
assert result is None
assert errors["input"][0].code == error_code
def test_validate_duplicated_channel_values(channel_PLN, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
field = "add_channels"
# when
result = BaseChannelListingMutation.validate_duplicated_channel_values(
[channel_id, second_channel_id], field, errors, error_code
)
# then
assert result is None
assert errors[field] == []
def test_validate_duplicated_channel_values_with_duplicates(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
field = "add_channels"
# when
result = BaseChannelListingMutation.validate_duplicated_channel_values(
[channel_id, second_channel_id], field, errors, error_code
)
# then
assert result is None
assert errors[field][0].code == error_code
def test_clean_channels_add_channels(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"add_channels": [{"channel_id": channel_id}]}, errors, error_code
)
# then
assert result == {
"add_channels": [{"channel_id": channel_id, "channel": channel_PLN}],
"remove_channels": [],
}
assert errors["input"] == []
def test_clean_channels_remove_channels(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id]}, errors, error_code
)
# then
assert result == {"add_channels": [], "remove_channels": [str(channel_PLN.id)]}
assert errors["input"] == []
def test_test_clean_channels_with_errors(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id, channel_id]}, errors, error_code
)
# then
assert result == {}
assert errors["remove_channels"][0].code == error_code
def test_test_clean_channels_invalid_object_type(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Product", channel_PLN.id)
error_code = ShippingErrorCode.GRAPHQL_ERROR.value
errors = defaultdict(list)
# when
with pytest.raises(ValidationError) as error:
BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id]}, errors, error_code
)
# then
assert (
error.value.error_dict["remove_channels"][0].message
== f"Must receive Channel id: {channel_id}."
)
| from collections import defaultdict
import graphene
import pytest
from django.core.exceptions import ValidationError
from ....shipping.error_codes import ShippingErrorCode
from ..mutations import BaseChannelListingMutation
def test_validate_duplicated_channel_ids(channel_PLN, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.validate_duplicated_channel_ids(
[channel_id],
[second_channel_id],
errors,
ShippingErrorCode.DUPLICATED_INPUT_ITEM.value,
)
# then
assert result is None
assert errors["input"] == []
def test_validate_duplicated_channel_ids_with_duplicates(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.validate_duplicated_channel_ids(
[channel_id], [second_channel_id], errors, error_code
)
# then
assert result is None
assert errors["input"][0].code == error_code
def test_validate_duplicated_channel_values(channel_PLN, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
field = "add_channels"
# when
result = BaseChannelListingMutation.validate_duplicated_channel_values(
[channel_id, second_channel_id], field, errors, error_code
)
# then
assert result is None
assert errors[field] == []
def test_validate_duplicated_channel_values_with_duplicates(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
field = "add_channels"
# when
result = BaseChannelListingMutation.validate_duplicated_channel_values(
[channel_id, second_channel_id], field, errors, error_code
)
# then
assert result is None
assert errors[field][0].code == error_code
def test_clean_channels_add_channels(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"add_channels": [{"channel_id": channel_id}]}, errors, error_code
)
# then
assert result == {
"add_channels": [{"channel_id": channel_id, "channel": channel_PLN}],
"remove_channels": [],
}
assert errors["input"] == []
def test_clean_channels_remove_channels(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id]}, errors, error_code
)
# then
assert result == {"add_channels": [], "remove_channels": [str(channel_PLN.id)]}
assert errors["input"] == []
def test_test_clean_channels_with_errors(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id, channel_id]}, errors, error_code
)
# then
assert result == {}
assert errors["remove_channels"][0].code == error_code
def test_test_clean_channels_invalid_object_type(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Product", channel_PLN.id)
error_code = ShippingErrorCode.GRAPHQL_ERROR.value
errors = defaultdict(list)
# when
with pytest.raises(ValidationError) as error:
BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id]}, errors, error_code
)
# then
assert (
error.value.error_dict["remove_channels"][0].message
== f"Must receive Channel id: {channel_id}."
)
| en | 0.24573 | # given # when # then # given # when # then # given # when # then # given # when # then # given # when # then # given # when # then # given # when # then # given # when # then | 2.40001 | 2 |
francoralite/apps/francoralite_front/tools.py | Francoralite/francoralite | 2 | 10043 | # -*- coding: utf-8 -*-
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: <NAME> / Coopérative ARTEFACTS <<EMAIL>>
import requests
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, Http404
from django.utils.translation import gettext as _
from requests.exceptions import RequestException
from rest_framework import status
from francoralite.apps.francoralite_front.errors import APPLICATION_ERRORS
from .views.related import (
write_fond_related,
write_mission_related,
write_collection_related,
write_item_related)
HTTP_ERRORS = {
status.HTTP_400_BAD_REQUEST: APPLICATION_ERRORS['HTTP_API_400'],
status.HTTP_401_UNAUTHORIZED: APPLICATION_ERRORS['HTTP_API_401'],
status.HTTP_403_FORBIDDEN: APPLICATION_ERRORS['HTTP_API_403'],
status.HTTP_404_NOT_FOUND: APPLICATION_ERRORS['HTTP_API_404'],
status.HTTP_409_CONFLICT: APPLICATION_ERRORS['HTTP_API_409'],
}
PROBLEM_NAMES = [
"legal_rights",
"recording_context",
"location_gis",
]
class UserMessageError(RequestException): pass
def get_token_header(request):
"""
TODO: À renseigner
"""
auth_token = request.session.get('oidc_access_token')
if auth_token:
return {'Authorization': 'Bearer ' + auth_token}
else:
return {}
def check_status_code(status_code, allowed_codes=(status.HTTP_200_OK,)):
"""
TODO: À renseigner
"""
if status_code == status.HTTP_403_FORBIDDEN:
raise PermissionDenied(_('Accès interdit.'))
if status_code == status.HTTP_404_NOT_FOUND:
raise Http404(_('Cette fiche n’existe pas.'))
if status_code == status.HTTP_409_CONFLICT:
raise UserMessageError(_('Une fiche avec ce code existe déjà.'))
if status.HTTP_400_BAD_REQUEST <= status_code < status.HTTP_500_INTERNAL_SERVER_ERROR:
raise RequestException()
if status_code not in allowed_codes:
raise Exception(HTTP_ERRORS[status_code])
def handle_message_from_exception(request, exception):
"""
TODO: À renseigner
"""
if isinstance(exception, UserMessageError):
messages.add_message(request, messages.ERROR, exception)
elif exception is not None:
messages.add_message(request, messages.ERROR,
_('Une erreur indéterminée est survenue.'))
def request_api(endpoint):
"""
TODO: À renseigner
"""
response = requests.get(settings.FRONT_HOST_URL + endpoint)
check_status_code(response.status_code)
return response.json()
def post(entity, form_entity, request, *args, **kwargs):
"""
TODO: À renseigner
"""
form = form_entity(request.POST, request.FILES)
entity_api = entity
entity_url = entity
# Processing the problem names entities
if entity in PROBLEM_NAMES:
entity_api = entity.replace('_', '')
# Processing URL for Mission entity
if entity == 'fond':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/' + entity
# Processing URL for Mission entity
if entity == 'mission':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/fond/' + kwargs['id_fond']\
+ '/' + entity
# Processing URL for Collection entity
if entity == 'collection':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/fond/' + kwargs['id_fond']\
+ '/mission/' + kwargs['id_mission'] \
+ '/' + entity
# Processing URL for Item entity
if entity == 'item':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/fond/' + kwargs['id_fond']\
+ '/mission/' + kwargs['id_mission'] \
+ '/collection/' + kwargs['id_collection'] \
+ '/' + entity
# Problem with old Telemeta fields/entities
if form.is_valid():
if entity == 'item':
# Concatenate domains
form.cleaned_data['domain'] = ''.join(form.cleaned_data['domain'])
# Remove the 'file' entry : if not, there some bugs
del form.cleaned_data['file']
try:
post_api(settings.FRONT_HOST_URL + '/api/' + entity_api,
data=form.cleaned_data,
request=request,
entity=entity)
if entity == 'fond':
return HttpResponseRedirect(
'/institution/' +
str(form.cleaned_data['institution']))
# Previous page ( not an edit page ... )
if len(request.session["referers"]) > 1:
try:
for referer in request.session["referers"]:
if 'add' not in referer.split('/'):
return HttpResponseRedirect(referer)
except Exception:
return HttpResponseRedirect('/' + entity)
return HttpResponseRedirect('/' + entity)
except RequestException as e:
handle_message_from_exception(request, e)
return HttpResponseRedirect('/' + entity_url + '/add')
return HttpResponseRedirect('/' + entity_url + '/add')
def post_api(endpoint, data, request, entity):
"""
TODO: À renseigner
"""
headers = get_token_header(request=request)
response = requests.post(
endpoint,
data=data,
files=request.FILES,
headers=headers,
)
check_status_code(response.status_code,
allowed_codes=(status.HTTP_200_OK, status.HTTP_201_CREATED))
entity_json = response.json()
if entity == "fond":
write_fond_related(entity_json, request, headers)
if entity == "mission":
write_mission_related(entity_json, request, headers)
if entity == "collection":
write_collection_related(entity_json, request, headers)
if entity == "item":
write_item_related(entity_json, request, headers)
return entity_json
def patch(entity, form_entity, request, *args, **kwargs):
"""
TODO: À renseigner
"""
form = form_entity(request.POST)
if entity == 'item':
form.fields['file'].required = False
id = kwargs.get('id')
entity_api = entity
if entity in PROBLEM_NAMES:
entity_api = entity.replace('_', '')
if form.is_valid():
if entity == "collection":
form.cleaned_data['recorded_from_year'] = \
form.data['recorded_from_year']
form.cleaned_data['recorded_to_year'] = \
form.data['recorded_to_year']
if form.cleaned_data['year_published'] is None:
form.cleaned_data['year_published'] = ''
if entity == "item":
# Concatenate domains
form.cleaned_data['domain'] = ''.join(form.cleaned_data['domain'])
try:
response = patch_api(
settings.FRONT_HOST_URL + '/api/' + entity_api + '/' + str(id),
data=form.cleaned_data,
request=request,
entity=entity
)
if(response.status_code != status.HTTP_200_OK):
return HttpResponseRedirect('/' + entity + '/edit/' +
str(id))
# Previous page ( not an edit page ... )
if len(request.session["referers"]) > 1:
for referer in request.session["referers"]:
if 'edit' not in referer.split('/'):
return HttpResponseRedirect(referer)
return HttpResponseRedirect('/' + entity)
except RequestException as e:
handle_message_from_exception(request, e)
return HttpResponseRedirect('/' + entity + '/edit/' + str(id))
return HttpResponseRedirect('/' + entity + '/edit/' + str(id))
def patch_api(endpoint, data, request, entity):
"""
TODO: À renseigner
"""
response = requests.patch(
endpoint,
data=data,
headers=get_token_header(request=request),
)
check_status_code(response.status_code)
entity_json = response.json()
if entity == "fond":
write_fond_related(
entity_json,
request,
headers=get_token_header(request=request),
)
if entity == "mission":
write_mission_related(
entity_json,
request,
headers=get_token_header(request=request),
)
if entity == "collection":
write_collection_related(
entity_json,
request,
headers=get_token_header(request=request),
)
if entity == "item":
write_item_related(
entity_json,
request,
headers=get_token_header(request=request),
)
return response
def delete(entity, request, *args, **kwargs):
"""
TODO: À renseigner
"""
id = kwargs.get('id')
entity_api = entity
if entity in PROBLEM_NAMES:
entity_api = entity.replace('_', '')
try:
delete_api(
settings.FRONT_HOST_URL + '/api/' + entity_api + '/' + str(id),
request=request,
)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
except RequestException as e:
handle_message_from_exception(request, e)
return HttpResponseRedirect('/' + entity)
def delete_api(endpoint, request):
"""
TODO: À renseigner
"""
response = requests.delete(
endpoint,
headers=get_token_header(request=request),
)
check_status_code(response.status_code)
return response
| # -*- coding: utf-8 -*-
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: <NAME> / Coopérative ARTEFACTS <<EMAIL>>
import requests
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, Http404
from django.utils.translation import gettext as _
from requests.exceptions import RequestException
from rest_framework import status
from francoralite.apps.francoralite_front.errors import APPLICATION_ERRORS
from .views.related import (
write_fond_related,
write_mission_related,
write_collection_related,
write_item_related)
HTTP_ERRORS = {
status.HTTP_400_BAD_REQUEST: APPLICATION_ERRORS['HTTP_API_400'],
status.HTTP_401_UNAUTHORIZED: APPLICATION_ERRORS['HTTP_API_401'],
status.HTTP_403_FORBIDDEN: APPLICATION_ERRORS['HTTP_API_403'],
status.HTTP_404_NOT_FOUND: APPLICATION_ERRORS['HTTP_API_404'],
status.HTTP_409_CONFLICT: APPLICATION_ERRORS['HTTP_API_409'],
}
PROBLEM_NAMES = [
"legal_rights",
"recording_context",
"location_gis",
]
class UserMessageError(RequestException): pass
def get_token_header(request):
"""
TODO: À renseigner
"""
auth_token = request.session.get('oidc_access_token')
if auth_token:
return {'Authorization': 'Bearer ' + auth_token}
else:
return {}
def check_status_code(status_code, allowed_codes=(status.HTTP_200_OK,)):
"""
TODO: À renseigner
"""
if status_code == status.HTTP_403_FORBIDDEN:
raise PermissionDenied(_('Accès interdit.'))
if status_code == status.HTTP_404_NOT_FOUND:
raise Http404(_('Cette fiche n’existe pas.'))
if status_code == status.HTTP_409_CONFLICT:
raise UserMessageError(_('Une fiche avec ce code existe déjà.'))
if status.HTTP_400_BAD_REQUEST <= status_code < status.HTTP_500_INTERNAL_SERVER_ERROR:
raise RequestException()
if status_code not in allowed_codes:
raise Exception(HTTP_ERRORS[status_code])
def handle_message_from_exception(request, exception):
"""
TODO: À renseigner
"""
if isinstance(exception, UserMessageError):
messages.add_message(request, messages.ERROR, exception)
elif exception is not None:
messages.add_message(request, messages.ERROR,
_('Une erreur indéterminée est survenue.'))
def request_api(endpoint):
"""
TODO: À renseigner
"""
response = requests.get(settings.FRONT_HOST_URL + endpoint)
check_status_code(response.status_code)
return response.json()
def post(entity, form_entity, request, *args, **kwargs):
"""
TODO: À renseigner
"""
form = form_entity(request.POST, request.FILES)
entity_api = entity
entity_url = entity
# Processing the problem names entities
if entity in PROBLEM_NAMES:
entity_api = entity.replace('_', '')
# Processing URL for Mission entity
if entity == 'fond':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/' + entity
# Processing URL for Mission entity
if entity == 'mission':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/fond/' + kwargs['id_fond']\
+ '/' + entity
# Processing URL for Collection entity
if entity == 'collection':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/fond/' + kwargs['id_fond']\
+ '/mission/' + kwargs['id_mission'] \
+ '/' + entity
# Processing URL for Item entity
if entity == 'item':
entity_url = 'institution/' + kwargs['id_institution'] \
+ '/fond/' + kwargs['id_fond']\
+ '/mission/' + kwargs['id_mission'] \
+ '/collection/' + kwargs['id_collection'] \
+ '/' + entity
# Problem with old Telemeta fields/entities
if form.is_valid():
if entity == 'item':
# Concatenate domains
form.cleaned_data['domain'] = ''.join(form.cleaned_data['domain'])
# Remove the 'file' entry : if not, there some bugs
del form.cleaned_data['file']
try:
post_api(settings.FRONT_HOST_URL + '/api/' + entity_api,
data=form.cleaned_data,
request=request,
entity=entity)
if entity == 'fond':
return HttpResponseRedirect(
'/institution/' +
str(form.cleaned_data['institution']))
# Previous page ( not an edit page ... )
if len(request.session["referers"]) > 1:
try:
for referer in request.session["referers"]:
if 'add' not in referer.split('/'):
return HttpResponseRedirect(referer)
except Exception:
return HttpResponseRedirect('/' + entity)
return HttpResponseRedirect('/' + entity)
except RequestException as e:
handle_message_from_exception(request, e)
return HttpResponseRedirect('/' + entity_url + '/add')
return HttpResponseRedirect('/' + entity_url + '/add')
def post_api(endpoint, data, request, entity):
"""
TODO: À renseigner
"""
headers = get_token_header(request=request)
response = requests.post(
endpoint,
data=data,
files=request.FILES,
headers=headers,
)
check_status_code(response.status_code,
allowed_codes=(status.HTTP_200_OK, status.HTTP_201_CREATED))
entity_json = response.json()
if entity == "fond":
write_fond_related(entity_json, request, headers)
if entity == "mission":
write_mission_related(entity_json, request, headers)
if entity == "collection":
write_collection_related(entity_json, request, headers)
if entity == "item":
write_item_related(entity_json, request, headers)
return entity_json
def patch(entity, form_entity, request, *args, **kwargs):
"""
TODO: À renseigner
"""
form = form_entity(request.POST)
if entity == 'item':
form.fields['file'].required = False
id = kwargs.get('id')
entity_api = entity
if entity in PROBLEM_NAMES:
entity_api = entity.replace('_', '')
if form.is_valid():
if entity == "collection":
form.cleaned_data['recorded_from_year'] = \
form.data['recorded_from_year']
form.cleaned_data['recorded_to_year'] = \
form.data['recorded_to_year']
if form.cleaned_data['year_published'] is None:
form.cleaned_data['year_published'] = ''
if entity == "item":
# Concatenate domains
form.cleaned_data['domain'] = ''.join(form.cleaned_data['domain'])
try:
response = patch_api(
settings.FRONT_HOST_URL + '/api/' + entity_api + '/' + str(id),
data=form.cleaned_data,
request=request,
entity=entity
)
if(response.status_code != status.HTTP_200_OK):
return HttpResponseRedirect('/' + entity + '/edit/' +
str(id))
# Previous page ( not an edit page ... )
if len(request.session["referers"]) > 1:
for referer in request.session["referers"]:
if 'edit' not in referer.split('/'):
return HttpResponseRedirect(referer)
return HttpResponseRedirect('/' + entity)
except RequestException as e:
handle_message_from_exception(request, e)
return HttpResponseRedirect('/' + entity + '/edit/' + str(id))
return HttpResponseRedirect('/' + entity + '/edit/' + str(id))
def patch_api(endpoint, data, request, entity):
"""
TODO: À renseigner
"""
response = requests.patch(
endpoint,
data=data,
headers=get_token_header(request=request),
)
check_status_code(response.status_code)
entity_json = response.json()
if entity == "fond":
write_fond_related(
entity_json,
request,
headers=get_token_header(request=request),
)
if entity == "mission":
write_mission_related(
entity_json,
request,
headers=get_token_header(request=request),
)
if entity == "collection":
write_collection_related(
entity_json,
request,
headers=get_token_header(request=request),
)
if entity == "item":
write_item_related(
entity_json,
request,
headers=get_token_header(request=request),
)
return response
def delete(entity, request, *args, **kwargs):
"""
TODO: À renseigner
"""
id = kwargs.get('id')
entity_api = entity
if entity in PROBLEM_NAMES:
entity_api = entity.replace('_', '')
try:
delete_api(
settings.FRONT_HOST_URL + '/api/' + entity_api + '/' + str(id),
request=request,
)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
except RequestException as e:
handle_message_from_exception(request, e)
return HttpResponseRedirect('/' + entity)
def delete_api(endpoint, request):
"""
TODO: À renseigner
"""
response = requests.delete(
endpoint,
headers=get_token_header(request=request),
)
check_status_code(response.status_code)
return response
| en | 0.35647 | # -*- coding: utf-8 -*- # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Authors: <NAME> / Coopérative ARTEFACTS <<EMAIL>> TODO: À renseigner TODO: À renseigner TODO: À renseigner TODO: À renseigner TODO: À renseigner # Processing the problem names entities # Processing URL for Mission entity # Processing URL for Mission entity # Processing URL for Collection entity # Processing URL for Item entity # Problem with old Telemeta fields/entities # Concatenate domains # Remove the 'file' entry : if not, there some bugs # Previous page ( not an edit page ... ) TODO: À renseigner TODO: À renseigner # Concatenate domains # Previous page ( not an edit page ... ) TODO: À renseigner TODO: À renseigner TODO: À renseigner | 1.885997 | 2 |
python2.7/site-packages/twisted/internet/iocpreactor/client.py | 84KaliPleXon3/sslstrip-hsts-openwrt | 4 | 10044 | <reponame>84KaliPleXon3/sslstrip-hsts-openwrt<filename>python2.7/site-packages/twisted/internet/iocpreactor/client.py
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import socket
from twisted.persisted import styles
from twisted.internet.base import BaseConnector
from twisted.internet import defer, interfaces, error
from twisted.python import failure
from abstract import ConnectedSocket
from ops import ConnectExOp
from util import StateEventMachineType
from zope.interface import implements
class ClientSocket(ConnectedSocket):
def __init__(self, sock, protocol, sf):
ConnectedSocket.__init__(self, sock, protocol, sf)
self.repstr = '<%s to %s at %x>' % (self.__class__, self.sf.addr, id(self))
self.logstr = protocol.__class__.__name__+",client"
self.startReading()
class _SubConnector:
state = "connecting"
socket = None
def __init__(self, sf):
self.sf = sf
def startConnecting(self):
d = defer.maybeDeferred(self.sf.resolveAddress)
d.addCallback(self._cbResolveDone)
d.addErrback(self._ebResolveErr)
def _cbResolveDone(self, addr):
if self.state == "dead":
return
try:
skt = socket.socket(*self.sf.sockinfo)
except socket.error, se:
raise error.ConnectBindError(se[0], se[1])
try:
if self.sf.bindAddress is None:
self.sf.bindAddress = ("", 0) # necessary for ConnectEx
skt.bind(self.sf.bindAddress)
except socket.error, se:
raise error.ConnectBindError(se[0], se[1])
self.socket = skt
op = ConnectExOp(self)
op.initiateOp(self.socket, addr)
def _ebResolveErr(self, fail):
if self.state == "dead":
return
self.sf.connectionFailed(fail)
def connectDone(self):
if self.state == "dead":
return
self.sf.connectionSuccess()
def connectErr(self, err):
if self.state == "dead":
return
self.sf.connectionFailed(err)
class SocketConnector(styles.Ephemeral, object):
__metaclass__ = StateEventMachineType
implements(interfaces.IConnector)
transport_class = ClientSocket
events = ["stopConnecting", "disconnect", "connect"]
sockinfo = None
factoryStarted = False
timeoutID = None
def __init__(self, addr, factory, timeout, bindAddress):
from twisted.internet import reactor
self.state = "disconnected"
self.addr = addr
self.factory = factory
self.timeout = timeout
self.bindAddress = bindAddress
self.reactor = reactor
self.prepareAddress()
def handle_connecting_stopConnecting(self):
self.connectionFailed(failure.Failure(error.UserError()))
def handle_disconnected_stopConnecting(self):
raise error.NotConnectingError
handle_connected_stopConnecting = handle_disconnected_stopConnecting
handle_connecting_disconnect = handle_connecting_stopConnecting
def handle_connected_disconnect(self):
self.transport.loseConnection()
def handle_disconnected_disconnect(self):
pass
def handle_connecting_connect(self):
raise RuntimeError, "can't connect in this state"
handle_connected_connect = handle_connecting_connect
def handle_disconnected_connect(self):
self.state = "connecting"
if not self.factoryStarted:
self.factory.doStart()
self.factoryStarted = True
if self.timeout is not None:
self.timeoutID = self.reactor.callLater(self.timeout, self.connectionFailed, failure.Failure(error.TimeoutError()))
self.sub = _SubConnector(self)
self.sub.startConnecting()
self.factory.startedConnecting(self)
def prepareAddress(self):
raise NotImplementedError
def resolveAddress(self):
raise NotImplementedError
def connectionLost(self, reason):
self.state = "disconnected"
self.factory.clientConnectionLost(self, reason)
if self.state == "disconnected":
# factory hasn't called our connect() method
self.factory.doStop()
self.factoryStarted = 0
def connectionFailed(self, reason):
if self.sub.socket:
self.sub.socket.close()
self.sub.state = "dead"
del self.sub
self.state = "disconnected"
self.cancelTimeout()
self.factory.clientConnectionFailed(self, reason)
if self.state == "disconnected":
# factory hasn't called our connect() method
self.factory.doStop()
self.factoryStarted = 0
def cancelTimeout(self):
if self.timeoutID:
try:
self.timeoutID.cancel()
except ValueError:
pass
del self.timeoutID
def connectionSuccess(self):
socket = self.sub.socket
self.sub.state = "dead"
del self.sub
self.state = "connected"
self.cancelTimeout()
p = self.factory.buildProtocol(self.buildAddress(socket.getpeername()))
self.transport = self.transport_class(socket, p, self)
p.makeConnection(self.transport)
| # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import socket
from twisted.persisted import styles
from twisted.internet.base import BaseConnector
from twisted.internet import defer, interfaces, error
from twisted.python import failure
from abstract import ConnectedSocket
from ops import ConnectExOp
from util import StateEventMachineType
from zope.interface import implements
class ClientSocket(ConnectedSocket):
def __init__(self, sock, protocol, sf):
ConnectedSocket.__init__(self, sock, protocol, sf)
self.repstr = '<%s to %s at %x>' % (self.__class__, self.sf.addr, id(self))
self.logstr = protocol.__class__.__name__+",client"
self.startReading()
class _SubConnector:
state = "connecting"
socket = None
def __init__(self, sf):
self.sf = sf
def startConnecting(self):
d = defer.maybeDeferred(self.sf.resolveAddress)
d.addCallback(self._cbResolveDone)
d.addErrback(self._ebResolveErr)
def _cbResolveDone(self, addr):
if self.state == "dead":
return
try:
skt = socket.socket(*self.sf.sockinfo)
except socket.error, se:
raise error.ConnectBindError(se[0], se[1])
try:
if self.sf.bindAddress is None:
self.sf.bindAddress = ("", 0) # necessary for ConnectEx
skt.bind(self.sf.bindAddress)
except socket.error, se:
raise error.ConnectBindError(se[0], se[1])
self.socket = skt
op = ConnectExOp(self)
op.initiateOp(self.socket, addr)
def _ebResolveErr(self, fail):
if self.state == "dead":
return
self.sf.connectionFailed(fail)
def connectDone(self):
if self.state == "dead":
return
self.sf.connectionSuccess()
def connectErr(self, err):
if self.state == "dead":
return
self.sf.connectionFailed(err)
class SocketConnector(styles.Ephemeral, object):
__metaclass__ = StateEventMachineType
implements(interfaces.IConnector)
transport_class = ClientSocket
events = ["stopConnecting", "disconnect", "connect"]
sockinfo = None
factoryStarted = False
timeoutID = None
def __init__(self, addr, factory, timeout, bindAddress):
from twisted.internet import reactor
self.state = "disconnected"
self.addr = addr
self.factory = factory
self.timeout = timeout
self.bindAddress = bindAddress
self.reactor = reactor
self.prepareAddress()
def handle_connecting_stopConnecting(self):
self.connectionFailed(failure.Failure(error.UserError()))
def handle_disconnected_stopConnecting(self):
raise error.NotConnectingError
handle_connected_stopConnecting = handle_disconnected_stopConnecting
handle_connecting_disconnect = handle_connecting_stopConnecting
def handle_connected_disconnect(self):
self.transport.loseConnection()
def handle_disconnected_disconnect(self):
pass
def handle_connecting_connect(self):
raise RuntimeError, "can't connect in this state"
handle_connected_connect = handle_connecting_connect
def handle_disconnected_connect(self):
self.state = "connecting"
if not self.factoryStarted:
self.factory.doStart()
self.factoryStarted = True
if self.timeout is not None:
self.timeoutID = self.reactor.callLater(self.timeout, self.connectionFailed, failure.Failure(error.TimeoutError()))
self.sub = _SubConnector(self)
self.sub.startConnecting()
self.factory.startedConnecting(self)
def prepareAddress(self):
raise NotImplementedError
def resolveAddress(self):
raise NotImplementedError
def connectionLost(self, reason):
self.state = "disconnected"
self.factory.clientConnectionLost(self, reason)
if self.state == "disconnected":
# factory hasn't called our connect() method
self.factory.doStop()
self.factoryStarted = 0
def connectionFailed(self, reason):
if self.sub.socket:
self.sub.socket.close()
self.sub.state = "dead"
del self.sub
self.state = "disconnected"
self.cancelTimeout()
self.factory.clientConnectionFailed(self, reason)
if self.state == "disconnected":
# factory hasn't called our connect() method
self.factory.doStop()
self.factoryStarted = 0
def cancelTimeout(self):
if self.timeoutID:
try:
self.timeoutID.cancel()
except ValueError:
pass
del self.timeoutID
def connectionSuccess(self):
socket = self.sub.socket
self.sub.state = "dead"
del self.sub
self.state = "connected"
self.cancelTimeout()
p = self.factory.buildProtocol(self.buildAddress(socket.getpeername()))
self.transport = self.transport_class(socket, p, self)
p.makeConnection(self.transport) | en | 0.915906 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. # necessary for ConnectEx # factory hasn't called our connect() method # factory hasn't called our connect() method | 2.062129 | 2 |
pypika/tests/dialects/test_mssql.py | uhrm/pypika | 0 | 10045 | <gh_stars>0
import unittest
from pypika import Table
from pypika.analytics import Count
from pypika.dialects import MSSQLQuery
from pypika.utils import QueryException
class SelectTests(unittest.TestCase):
def test_normal_select(self):
q = MSSQLQuery.from_("abc").select("def")
self.assertEqual('SELECT "def" FROM "abc"', str(q))
def test_distinct_select(self):
q = MSSQLQuery.from_("abc").select("def").distinct()
self.assertEqual('SELECT DISTINCT "def" FROM "abc"', str(q))
def test_top_distinct_select(self):
q = MSSQLQuery.from_("abc").select("def").top(10).distinct()
self.assertEqual('SELECT DISTINCT TOP (10) "def" FROM "abc"', str(q))
def test_top_select(self):
q = MSSQLQuery.from_("abc").select("def").top(10)
self.assertEqual('SELECT TOP (10) "def" FROM "abc"', str(q))
def test_top_select_non_int(self):
with self.assertRaisesRegex(QueryException, "TOP value must be an integer"):
MSSQLQuery.from_("abc").select("def").top("a")
def test_limit(self):
q = MSSQLQuery.from_("abc").select("def").orderby("def").limit(10)
self.assertEqual('SELECT "def" FROM "abc" ORDER BY "def" OFFSET 0 ROWS FETCH NEXT 10 ROWS ONLY', str(q))
def test_fetch_next(self):
q = MSSQLQuery.from_("abc").select("def").orderby("def").fetch_next(10)
self.assertEqual('SELECT "def" FROM "abc" ORDER BY "def" OFFSET 0 ROWS FETCH NEXT 10 ROWS ONLY', str(q))
def test_offset(self):
q = MSSQLQuery.from_("abc").select("def").orderby("def").offset(10)
self.assertEqual('SELECT "def" FROM "abc" ORDER BY "def" OFFSET 10 ROWS', str(q))
def test_fetch_next_with_offset(self):
q = MSSQLQuery.from_("abc").select("def").orderby("def").fetch_next(10).offset(10)
self.assertEqual('SELECT "def" FROM "abc" ORDER BY "def" OFFSET 10 ROWS FETCH NEXT 10 ROWS ONLY', str(q))
def test_groupby_alias_False_does_not_group_by_alias_with_standard_query(self):
t = Table('table1')
col = t.abc.as_('a')
q = MSSQLQuery.from_(t).select(col, Count('*')).groupby(col)
self.assertEqual('SELECT "abc" "a",COUNT(\'*\') FROM "table1" GROUP BY "abc"', str(q))
def test_groupby_alias_False_does_not_group_by_alias_when_subqueries_are_present(self):
t = Table('table1')
subquery = MSSQLQuery.from_(t).select(t.abc)
col = subquery.abc.as_('a')
q = MSSQLQuery.from_(subquery).select(col, Count('*')).groupby(col)
self.assertEqual(
'SELECT "sq0"."abc" "a",COUNT(\'*\') FROM (SELECT "abc" FROM "table1") "sq0" GROUP BY "sq0"."abc"', str(q)
)
| import unittest
from pypika import Table
from pypika.analytics import Count
from pypika.dialects import MSSQLQuery
from pypika.utils import QueryException
class SelectTests(unittest.TestCase):
def test_normal_select(self):
q = MSSQLQuery.from_("abc").select("def")
self.assertEqual('SELECT "def" FROM "abc"', str(q))
def test_distinct_select(self):
q = MSSQLQuery.from_("abc").select("def").distinct()
self.assertEqual('SELECT DISTINCT "def" FROM "abc"', str(q))
def test_top_distinct_select(self):
q = MSSQLQuery.from_("abc").select("def").top(10).distinct()
self.assertEqual('SELECT DISTINCT TOP (10) "def" FROM "abc"', str(q))
def test_top_select(self):
q = MSSQLQuery.from_("abc").select("def").top(10)
self.assertEqual('SELECT TOP (10) "def" FROM "abc"', str(q))
def test_top_select_non_int(self):
with self.assertRaisesRegex(QueryException, "TOP value must be an integer"):
MSSQLQuery.from_("abc").select("def").top("a")
def test_limit(self):
q = MSSQLQuery.from_("abc").select("def").orderby("def").limit(10)
self.assertEqual('SELECT "def" FROM "abc" ORDER BY "def" OFFSET 0 ROWS FETCH NEXT 10 ROWS ONLY', str(q))
def test_fetch_next(self):
q = MSSQLQuery.from_("abc").select("def").orderby("def").fetch_next(10)
self.assertEqual('SELECT "def" FROM "abc" ORDER BY "def" OFFSET 0 ROWS FETCH NEXT 10 ROWS ONLY', str(q))
def test_offset(self):
q = MSSQLQuery.from_("abc").select("def").orderby("def").offset(10)
self.assertEqual('SELECT "def" FROM "abc" ORDER BY "def" OFFSET 10 ROWS', str(q))
def test_fetch_next_with_offset(self):
q = MSSQLQuery.from_("abc").select("def").orderby("def").fetch_next(10).offset(10)
self.assertEqual('SELECT "def" FROM "abc" ORDER BY "def" OFFSET 10 ROWS FETCH NEXT 10 ROWS ONLY', str(q))
def test_groupby_alias_False_does_not_group_by_alias_with_standard_query(self):
t = Table('table1')
col = t.abc.as_('a')
q = MSSQLQuery.from_(t).select(col, Count('*')).groupby(col)
self.assertEqual('SELECT "abc" "a",COUNT(\'*\') FROM "table1" GROUP BY "abc"', str(q))
def test_groupby_alias_False_does_not_group_by_alias_when_subqueries_are_present(self):
t = Table('table1')
subquery = MSSQLQuery.from_(t).select(t.abc)
col = subquery.abc.as_('a')
q = MSSQLQuery.from_(subquery).select(col, Count('*')).groupby(col)
self.assertEqual(
'SELECT "sq0"."abc" "a",COUNT(\'*\') FROM (SELECT "abc" FROM "table1") "sq0" GROUP BY "sq0"."abc"', str(q)
) | none | 1 | 2.619437 | 3 |
|
Postprocessing/Hardt/Hardt.py | maliha93/Fairness-Analysis-Code | 0 | 10046 | <reponame>maliha93/Fairness-Analysis-Code
import cvxpy as cvx
import numpy as np
from collections import namedtuple
from metric import metric, cd
import pandas as pd
import sys
from helper import make_dataset
class Model(namedtuple('Model', 'pred label')):
def logits(self):
raw_logits = np.clip(np.log(self.pred / (1 - self.pred)), -100, 100)
return raw_logits
def num_samples(self):
return len(self.pred)
def base_rate(self):
"""
Percentage of samples belonging to the positive class
"""
return np.mean(self.label)
def accuracy(self):
return self.accuracies().mean()
def precision(self):
return (self.label[self.pred.round() == 1]).mean()
def recall(self):
return (self.label[self.label == 1].round()).mean()
def tpr(self):
"""
True positive rate
"""
return np.mean(np.logical_and(self.pred.round() == 1, self.label == 1))
def fpr(self):
"""
False positive rate
"""
return np.mean(np.logical_and(self.pred.round() == 1, self.label == 0))
def tnr(self):
"""
True negative rate
"""
return np.mean(np.logical_and(self.pred.round() == 0, self.label == 0))
def fnr(self):
"""
False negative rate
"""
return np.mean(np.logical_and(self.pred.round() == 0, self.label == 1))
def fn_cost(self):
"""
Generalized false negative cost
"""
return 1 - self.pred[self.label == 1].mean()
def fp_cost(self):
"""
Generalized false positive cost
"""
return self.pred[self.label == 0].mean()
def accuracies(self):
return self.pred.round() == self.label
def eq_odds(self, othr, mix_rates=None):
has_mix_rates = not (mix_rates is None)
if not has_mix_rates:
mix_rates = self.eq_odds_optimal_mix_rates(othr)
sp2p, sn2p, op2p, on2p = tuple(mix_rates)
self_fair_pred = self.pred.copy()
self_pp_indices, = np.nonzero(self.pred.round())
self_pn_indices, = np.nonzero(1 - self.pred.round())
np.random.shuffle(self_pp_indices)
np.random.shuffle(self_pn_indices)
n2p_indices = self_pn_indices[:int(len(self_pn_indices) * sn2p)]
self_fair_pred[n2p_indices] = 1 - self_fair_pred[n2p_indices]
p2n_indices = self_pp_indices[:int(len(self_pp_indices) * (1 - sp2p))]
self_fair_pred[p2n_indices] = 1 - self_fair_pred[p2n_indices]
othr_fair_pred = othr.pred.copy()
othr_pp_indices, = np.nonzero(othr.pred.round())
othr_pn_indices, = np.nonzero(1 - othr.pred.round())
np.random.shuffle(othr_pp_indices)
np.random.shuffle(othr_pn_indices)
n2p_indices = othr_pn_indices[:int(len(othr_pn_indices) * on2p)]
othr_fair_pred[n2p_indices] = 1 - othr_fair_pred[n2p_indices]
p2n_indices = othr_pp_indices[:int(len(othr_pp_indices) * (1 - op2p))]
othr_fair_pred[p2n_indices] = 1 - othr_fair_pred[p2n_indices]
fair_self = Model(self_fair_pred, self.label)
fair_othr = Model(othr_fair_pred, othr.label)
if not has_mix_rates:
return fair_self, fair_othr, mix_rates
else:
return fair_self, fair_othr
def eq_odds_optimal_mix_rates(self, othr):
sbr = float(self.base_rate())
obr = float(othr.base_rate())
sp2p = cvx.Variable(1)
sp2n = cvx.Variable(1)
sn2p = cvx.Variable(1)
sn2n = cvx.Variable(1)
op2p = cvx.Variable(1)
op2n = cvx.Variable(1)
on2p = cvx.Variable(1)
on2n = cvx.Variable(1)
sfpr = self.fpr() * sp2p + self.tnr() * sn2p
sfnr = self.fnr() * sn2n + self.tpr() * sp2n
ofpr = othr.fpr() * op2p + othr.tnr() * on2p
ofnr = othr.fnr() * on2n + othr.tpr() * op2n
error = sfpr + sfnr + ofpr + ofnr
sflip = 1 - self.pred
sconst = self.pred
oflip = 1 - othr.pred
oconst = othr.pred
sm_tn = np.logical_and(self.pred.round() == 0, self.label == 0)
sm_fn = np.logical_and(self.pred.round() == 0, self.label == 1)
sm_tp = np.logical_and(self.pred.round() == 1, self.label == 1)
sm_fp = np.logical_and(self.pred.round() == 1, self.label == 0)
om_tn = np.logical_and(othr.pred.round() == 0, othr.label == 0)
om_fn = np.logical_and(othr.pred.round() == 0, othr.label == 1)
om_tp = np.logical_and(othr.pred.round() == 1, othr.label == 1)
om_fp = np.logical_and(othr.pred.round() == 1, othr.label == 0)
spn_given_p = (sn2p * (sflip * sm_fn).mean() + sn2n * (sconst * sm_fn).mean()) / sbr + \
(sp2p * (sconst * sm_tp).mean() + sp2n * (sflip * sm_tp).mean()) / sbr
spp_given_n = (sp2n * (sflip * sm_fp).mean() + sp2p * (sconst * sm_fp).mean()) / (1 - sbr) + \
(sn2p * (sflip * sm_tn).mean() + sn2n * (sconst * sm_tn).mean()) / (1 - sbr)
opn_given_p = (on2p * (oflip * om_fn).mean() + on2n * (oconst * om_fn).mean()) / obr + \
(op2p * (oconst * om_tp).mean() + op2n * (oflip * om_tp).mean()) / obr
opp_given_n = (op2n * (oflip * om_fp).mean() + op2p * (oconst * om_fp).mean()) / (1 - obr) + \
(on2p * (oflip * om_tn).mean() + on2n * (oconst * om_tn).mean()) / (1 - obr)
constraints = [
sp2p == 1 - sp2n,
sn2p == 1 - sn2n,
op2p == 1 - op2n,
on2p == 1 - on2n,
sp2p <= 1,
sp2p >= 0,
sn2p <= 1,
sn2p >= 0,
op2p <= 1,
op2p >= 0,
on2p <= 1,
on2p >= 0,
spp_given_n == opp_given_n,
spn_given_p == opn_given_p,
]
prob = cvx.Problem(cvx.Minimize(error), constraints)
prob.solve()
res = np.array([sp2p.value, sn2p.value, op2p.value, on2p.value])
return res
def __repr__(self):
return '\n'.join([
'Accuracy:\t%.3f' % self.accuracy(),
'F.P. cost:\t%.3f' % self.fp_cost(),
'F.N. cost:\t%.3f' % self.fn_cost(),
'T.P. rate:\t%.3f' % self.tpr(),
'T.N. rate:\t%.3f' % self.tnr(),
'Precision:\t%.3f' % self.precision(),
'Recall:\t\t%.3f' % self.recall(),
'Base rate:\t%.3f' % self.base_rate(),
'Avg. score:\t%.3f' % self.pred.mean(),
])
def Adult(f="data/adult_post.csv"):
data_filename = f
test_and_val_data = pd.read_csv(data_filename)
order = np.arange(len(test_and_val_data))
leng = order.shape[0]
val_indices = order[0:int(leng*0.7)]
test_indices = order[int(leng*0.7):]
val_data = test_and_val_data.iloc[val_indices]
test_data = test_and_val_data.iloc[test_indices]
# Create model objects - one for each group, validation and test
group_0_val_data = val_data[val_data['group'] == 0]
group_1_val_data = val_data[val_data['group'] == 1]
group_0_test_data = test_data[test_data['group'] == 0]
group_1_test_data = test_data[test_data['group'] == 1]
group_0_val_model = Model(group_0_val_data['prediction'].to_numpy(), group_0_val_data['label'].to_numpy())
group_1_val_model = Model(group_1_val_data['prediction'].to_numpy(), group_1_val_data['label'].to_numpy())
group_0_test_model = Model(group_0_test_data['prediction'].to_numpy(), group_0_test_data['label'].to_numpy())
group_1_test_model = Model(group_1_test_data['prediction'].to_numpy(), group_1_test_data['label'].to_numpy())
# Find mixing rates for equalized odds models
_, _, mix_rates = Model.eq_odds(group_0_val_model, group_1_val_model)
# Apply the mixing rates to the test models
eq_odds_group_0_test_model, eq_odds_group_1_test_model = Model.eq_odds(group_0_test_model,
group_1_test_model,
mix_rates)
cd_eq_odds_group_0_test_model, cd_eq_odds_group_1_test_model = Model.eq_odds(group_1_test_model,
group_0_test_model,
mix_rates)
metric(eq_odds_group_0_test_model.label, eq_odds_group_0_test_model.pred,
eq_odds_group_1_test_model.label, eq_odds_group_1_test_model.pred)
y_cd = cd(eq_odds_group_0_test_model.pred.round(), eq_odds_group_1_test_model.pred.round(),\
cd_eq_odds_group_0_test_model.pred.round(),cd_eq_odds_group_1_test_model.pred.round())
group_0_test_data['pred'] = eq_odds_group_0_test_model.pred.round()
group_1_test_data['pred'] = eq_odds_group_1_test_model.pred.round()
df = group_0_test_data.append(group_1_test_data)
df = df.drop(['group', 'prediction', 'label'], axis=1).sample(frac=1)
df.to_csv("results_Hardt/adult_test_repaired.csv", index=False)
np.savetxt("results_Hardt/adult_test_repaired_cd.csv", y_cd, delimiter=",")
def Compas(f="data/compas_post.csv", f1='', f2=''):
data_filename = f
test_and_val_data = pd.read_csv(data_filename)
# Randomly split the data into two sets - one for computing the fairness constants
order = np.arange(len(test_and_val_data))
leng = order.shape[0]
val_indices = order[0:int(leng*0.7)]
test_indices = order[int(leng*0.7):]
val_data = test_and_val_data.iloc[val_indices]
test_data = test_and_val_data.iloc[test_indices]
# Create model objects - one for each group, validation and test
group_0_val_data = val_data[val_data['group'] == 0]
group_1_val_data = val_data[val_data['group'] == 1]
group_0_test_data = test_data[test_data['group'] == 0]
group_1_test_data = test_data[test_data['group'] == 1]
group_0_val_model = Model(group_0_val_data['prediction'].to_numpy(), group_0_val_data['label'].to_numpy())
group_1_val_model = Model(group_1_val_data['prediction'].to_numpy(), group_1_val_data['label'].to_numpy())
group_0_test_model = Model(group_0_test_data['prediction'].to_numpy(), group_0_test_data['label'].to_numpy())
group_1_test_model = Model(group_1_test_data['prediction'].to_numpy(), group_1_test_data['label'].to_numpy())
# Find mixing rates for equalized odds models
_, _, mix_rates = Model.eq_odds(group_0_val_model, group_1_val_model)
# Apply the mixing rates to the test models
eq_odds_group_0_test_model, eq_odds_group_1_test_model = Model.eq_odds(group_0_test_model,
group_1_test_model,
mix_rates)
cd_eq_odds_group_0_test_model, cd_eq_odds_group_1_test_model = Model.eq_odds(group_1_test_model,
group_0_test_model,
mix_rates)
metric(eq_odds_group_0_test_model.label, eq_odds_group_0_test_model.pred,
eq_odds_group_1_test_model.label, eq_odds_group_1_test_model.pred)
y_cd = cd(eq_odds_group_0_test_model.pred.round(), eq_odds_group_1_test_model.pred.round(),\
cd_eq_odds_group_0_test_model.pred.round(),cd_eq_odds_group_1_test_model.pred.round())
group_0_test_data['pred'] = eq_odds_group_0_test_model.pred.round()
group_1_test_data['pred'] = eq_odds_group_1_test_model.pred.round()
df = group_0_test_data.append(group_1_test_data)
df = df.drop(['group', 'prediction', 'label'], axis=1).sample(frac=1)
df.to_csv(f1+"results_Hardt/compas_test_repaired"+f2+".csv", index=False)
np.savetxt(f1+"results_Hardt/compas_test_repaired"+f2+"_cd.csv", y_cd, delimiter=",")
def German(f="data/german_post.csv"):
data_filename = f
test_and_val_data = pd.read_csv(data_filename)
# Randomly split the data into two sets - one for computing the fairness constants
order = np.arange(len(test_and_val_data))
leng = order.shape[0]
val_indices = order[0:int(leng*0.7)]
test_indices = order[int(leng*0.7):]
val_data = test_and_val_data.iloc[val_indices]
test_data = test_and_val_data.iloc[test_indices]
# Create model objects - one for each group, validation and test
group_0_val_data = val_data[val_data['group'] == 0]
group_1_val_data = val_data[val_data['group'] == 1]
group_0_test_data = test_data[test_data['group'] == 0]
group_1_test_data = test_data[test_data['group'] == 1]
group_0_val_model = Model(group_0_val_data['prediction'].to_numpy(), group_0_val_data['label'].to_numpy())
group_1_val_model = Model(group_1_val_data['prediction'].to_numpy(), group_1_val_data['label'].to_numpy())
group_0_test_model = Model(group_0_test_data['prediction'].to_numpy(), group_0_test_data['label'].to_numpy())
group_1_test_model = Model(group_1_test_data['prediction'].to_numpy(), group_1_test_data['label'].to_numpy())
# Find mixing rates for equalized odds models
_, _, mix_rates = Model.eq_odds(group_0_val_model, group_1_val_model)
# Apply the mixing rates to the test models
eq_odds_group_0_test_model, eq_odds_group_1_test_model = Model.eq_odds(group_0_test_model,
group_1_test_model,
mix_rates)
cd_eq_odds_group_0_test_model, cd_eq_odds_group_1_test_model = Model.eq_odds(group_1_test_model,
group_0_test_model,
mix_rates)
metric(eq_odds_group_0_test_model.label, eq_odds_group_0_test_model.pred,
eq_odds_group_1_test_model.label, eq_odds_group_1_test_model.pred)
y_cd = cd(eq_odds_group_0_test_model.pred.round(), eq_odds_group_1_test_model.pred.round(),\
cd_eq_odds_group_0_test_model.pred.round(),cd_eq_odds_group_1_test_model.pred.round())
group_0_test_data['pred'] = eq_odds_group_0_test_model.pred.round()
group_1_test_data['pred'] = eq_odds_group_1_test_model.pred.round()
df = group_0_test_data.append(group_1_test_data)
df = df.drop(['group', 'prediction', 'label'], axis=1).sample(frac=1)
df.to_csv("results_Hardt/german_test_repaired.csv", index=False)
np.savetxt("results_Hardt/german_test_repaired_cd.csv", y_cd, delimiter=",")
def Hardt(dataset):
make_dataset(dataset)
if dataset == 'adult':
Adult()
elif dataset == 'compas':
Compas()
elif dataset == 'german':
German()
| import cvxpy as cvx
import numpy as np
from collections import namedtuple
from metric import metric, cd
import pandas as pd
import sys
from helper import make_dataset
class Model(namedtuple('Model', 'pred label')):
def logits(self):
raw_logits = np.clip(np.log(self.pred / (1 - self.pred)), -100, 100)
return raw_logits
def num_samples(self):
return len(self.pred)
def base_rate(self):
"""
Percentage of samples belonging to the positive class
"""
return np.mean(self.label)
def accuracy(self):
return self.accuracies().mean()
def precision(self):
return (self.label[self.pred.round() == 1]).mean()
def recall(self):
return (self.label[self.label == 1].round()).mean()
def tpr(self):
"""
True positive rate
"""
return np.mean(np.logical_and(self.pred.round() == 1, self.label == 1))
def fpr(self):
"""
False positive rate
"""
return np.mean(np.logical_and(self.pred.round() == 1, self.label == 0))
def tnr(self):
"""
True negative rate
"""
return np.mean(np.logical_and(self.pred.round() == 0, self.label == 0))
def fnr(self):
"""
False negative rate
"""
return np.mean(np.logical_and(self.pred.round() == 0, self.label == 1))
def fn_cost(self):
"""
Generalized false negative cost
"""
return 1 - self.pred[self.label == 1].mean()
def fp_cost(self):
"""
Generalized false positive cost
"""
return self.pred[self.label == 0].mean()
def accuracies(self):
return self.pred.round() == self.label
def eq_odds(self, othr, mix_rates=None):
has_mix_rates = not (mix_rates is None)
if not has_mix_rates:
mix_rates = self.eq_odds_optimal_mix_rates(othr)
sp2p, sn2p, op2p, on2p = tuple(mix_rates)
self_fair_pred = self.pred.copy()
self_pp_indices, = np.nonzero(self.pred.round())
self_pn_indices, = np.nonzero(1 - self.pred.round())
np.random.shuffle(self_pp_indices)
np.random.shuffle(self_pn_indices)
n2p_indices = self_pn_indices[:int(len(self_pn_indices) * sn2p)]
self_fair_pred[n2p_indices] = 1 - self_fair_pred[n2p_indices]
p2n_indices = self_pp_indices[:int(len(self_pp_indices) * (1 - sp2p))]
self_fair_pred[p2n_indices] = 1 - self_fair_pred[p2n_indices]
othr_fair_pred = othr.pred.copy()
othr_pp_indices, = np.nonzero(othr.pred.round())
othr_pn_indices, = np.nonzero(1 - othr.pred.round())
np.random.shuffle(othr_pp_indices)
np.random.shuffle(othr_pn_indices)
n2p_indices = othr_pn_indices[:int(len(othr_pn_indices) * on2p)]
othr_fair_pred[n2p_indices] = 1 - othr_fair_pred[n2p_indices]
p2n_indices = othr_pp_indices[:int(len(othr_pp_indices) * (1 - op2p))]
othr_fair_pred[p2n_indices] = 1 - othr_fair_pred[p2n_indices]
fair_self = Model(self_fair_pred, self.label)
fair_othr = Model(othr_fair_pred, othr.label)
if not has_mix_rates:
return fair_self, fair_othr, mix_rates
else:
return fair_self, fair_othr
def eq_odds_optimal_mix_rates(self, othr):
sbr = float(self.base_rate())
obr = float(othr.base_rate())
sp2p = cvx.Variable(1)
sp2n = cvx.Variable(1)
sn2p = cvx.Variable(1)
sn2n = cvx.Variable(1)
op2p = cvx.Variable(1)
op2n = cvx.Variable(1)
on2p = cvx.Variable(1)
on2n = cvx.Variable(1)
sfpr = self.fpr() * sp2p + self.tnr() * sn2p
sfnr = self.fnr() * sn2n + self.tpr() * sp2n
ofpr = othr.fpr() * op2p + othr.tnr() * on2p
ofnr = othr.fnr() * on2n + othr.tpr() * op2n
error = sfpr + sfnr + ofpr + ofnr
sflip = 1 - self.pred
sconst = self.pred
oflip = 1 - othr.pred
oconst = othr.pred
sm_tn = np.logical_and(self.pred.round() == 0, self.label == 0)
sm_fn = np.logical_and(self.pred.round() == 0, self.label == 1)
sm_tp = np.logical_and(self.pred.round() == 1, self.label == 1)
sm_fp = np.logical_and(self.pred.round() == 1, self.label == 0)
om_tn = np.logical_and(othr.pred.round() == 0, othr.label == 0)
om_fn = np.logical_and(othr.pred.round() == 0, othr.label == 1)
om_tp = np.logical_and(othr.pred.round() == 1, othr.label == 1)
om_fp = np.logical_and(othr.pred.round() == 1, othr.label == 0)
spn_given_p = (sn2p * (sflip * sm_fn).mean() + sn2n * (sconst * sm_fn).mean()) / sbr + \
(sp2p * (sconst * sm_tp).mean() + sp2n * (sflip * sm_tp).mean()) / sbr
spp_given_n = (sp2n * (sflip * sm_fp).mean() + sp2p * (sconst * sm_fp).mean()) / (1 - sbr) + \
(sn2p * (sflip * sm_tn).mean() + sn2n * (sconst * sm_tn).mean()) / (1 - sbr)
opn_given_p = (on2p * (oflip * om_fn).mean() + on2n * (oconst * om_fn).mean()) / obr + \
(op2p * (oconst * om_tp).mean() + op2n * (oflip * om_tp).mean()) / obr
opp_given_n = (op2n * (oflip * om_fp).mean() + op2p * (oconst * om_fp).mean()) / (1 - obr) + \
(on2p * (oflip * om_tn).mean() + on2n * (oconst * om_tn).mean()) / (1 - obr)
constraints = [
sp2p == 1 - sp2n,
sn2p == 1 - sn2n,
op2p == 1 - op2n,
on2p == 1 - on2n,
sp2p <= 1,
sp2p >= 0,
sn2p <= 1,
sn2p >= 0,
op2p <= 1,
op2p >= 0,
on2p <= 1,
on2p >= 0,
spp_given_n == opp_given_n,
spn_given_p == opn_given_p,
]
prob = cvx.Problem(cvx.Minimize(error), constraints)
prob.solve()
res = np.array([sp2p.value, sn2p.value, op2p.value, on2p.value])
return res
def __repr__(self):
return '\n'.join([
'Accuracy:\t%.3f' % self.accuracy(),
'F.P. cost:\t%.3f' % self.fp_cost(),
'F.N. cost:\t%.3f' % self.fn_cost(),
'T.P. rate:\t%.3f' % self.tpr(),
'T.N. rate:\t%.3f' % self.tnr(),
'Precision:\t%.3f' % self.precision(),
'Recall:\t\t%.3f' % self.recall(),
'Base rate:\t%.3f' % self.base_rate(),
'Avg. score:\t%.3f' % self.pred.mean(),
])
def Adult(f="data/adult_post.csv"):
data_filename = f
test_and_val_data = pd.read_csv(data_filename)
order = np.arange(len(test_and_val_data))
leng = order.shape[0]
val_indices = order[0:int(leng*0.7)]
test_indices = order[int(leng*0.7):]
val_data = test_and_val_data.iloc[val_indices]
test_data = test_and_val_data.iloc[test_indices]
# Create model objects - one for each group, validation and test
group_0_val_data = val_data[val_data['group'] == 0]
group_1_val_data = val_data[val_data['group'] == 1]
group_0_test_data = test_data[test_data['group'] == 0]
group_1_test_data = test_data[test_data['group'] == 1]
group_0_val_model = Model(group_0_val_data['prediction'].to_numpy(), group_0_val_data['label'].to_numpy())
group_1_val_model = Model(group_1_val_data['prediction'].to_numpy(), group_1_val_data['label'].to_numpy())
group_0_test_model = Model(group_0_test_data['prediction'].to_numpy(), group_0_test_data['label'].to_numpy())
group_1_test_model = Model(group_1_test_data['prediction'].to_numpy(), group_1_test_data['label'].to_numpy())
# Find mixing rates for equalized odds models
_, _, mix_rates = Model.eq_odds(group_0_val_model, group_1_val_model)
# Apply the mixing rates to the test models
eq_odds_group_0_test_model, eq_odds_group_1_test_model = Model.eq_odds(group_0_test_model,
group_1_test_model,
mix_rates)
cd_eq_odds_group_0_test_model, cd_eq_odds_group_1_test_model = Model.eq_odds(group_1_test_model,
group_0_test_model,
mix_rates)
metric(eq_odds_group_0_test_model.label, eq_odds_group_0_test_model.pred,
eq_odds_group_1_test_model.label, eq_odds_group_1_test_model.pred)
y_cd = cd(eq_odds_group_0_test_model.pred.round(), eq_odds_group_1_test_model.pred.round(),\
cd_eq_odds_group_0_test_model.pred.round(),cd_eq_odds_group_1_test_model.pred.round())
group_0_test_data['pred'] = eq_odds_group_0_test_model.pred.round()
group_1_test_data['pred'] = eq_odds_group_1_test_model.pred.round()
df = group_0_test_data.append(group_1_test_data)
df = df.drop(['group', 'prediction', 'label'], axis=1).sample(frac=1)
df.to_csv("results_Hardt/adult_test_repaired.csv", index=False)
np.savetxt("results_Hardt/adult_test_repaired_cd.csv", y_cd, delimiter=",")
def Compas(f="data/compas_post.csv", f1='', f2=''):
data_filename = f
test_and_val_data = pd.read_csv(data_filename)
# Randomly split the data into two sets - one for computing the fairness constants
order = np.arange(len(test_and_val_data))
leng = order.shape[0]
val_indices = order[0:int(leng*0.7)]
test_indices = order[int(leng*0.7):]
val_data = test_and_val_data.iloc[val_indices]
test_data = test_and_val_data.iloc[test_indices]
# Create model objects - one for each group, validation and test
group_0_val_data = val_data[val_data['group'] == 0]
group_1_val_data = val_data[val_data['group'] == 1]
group_0_test_data = test_data[test_data['group'] == 0]
group_1_test_data = test_data[test_data['group'] == 1]
group_0_val_model = Model(group_0_val_data['prediction'].to_numpy(), group_0_val_data['label'].to_numpy())
group_1_val_model = Model(group_1_val_data['prediction'].to_numpy(), group_1_val_data['label'].to_numpy())
group_0_test_model = Model(group_0_test_data['prediction'].to_numpy(), group_0_test_data['label'].to_numpy())
group_1_test_model = Model(group_1_test_data['prediction'].to_numpy(), group_1_test_data['label'].to_numpy())
# Find mixing rates for equalized odds models
_, _, mix_rates = Model.eq_odds(group_0_val_model, group_1_val_model)
# Apply the mixing rates to the test models
eq_odds_group_0_test_model, eq_odds_group_1_test_model = Model.eq_odds(group_0_test_model,
group_1_test_model,
mix_rates)
cd_eq_odds_group_0_test_model, cd_eq_odds_group_1_test_model = Model.eq_odds(group_1_test_model,
group_0_test_model,
mix_rates)
metric(eq_odds_group_0_test_model.label, eq_odds_group_0_test_model.pred,
eq_odds_group_1_test_model.label, eq_odds_group_1_test_model.pred)
y_cd = cd(eq_odds_group_0_test_model.pred.round(), eq_odds_group_1_test_model.pred.round(),\
cd_eq_odds_group_0_test_model.pred.round(),cd_eq_odds_group_1_test_model.pred.round())
group_0_test_data['pred'] = eq_odds_group_0_test_model.pred.round()
group_1_test_data['pred'] = eq_odds_group_1_test_model.pred.round()
df = group_0_test_data.append(group_1_test_data)
df = df.drop(['group', 'prediction', 'label'], axis=1).sample(frac=1)
df.to_csv(f1+"results_Hardt/compas_test_repaired"+f2+".csv", index=False)
np.savetxt(f1+"results_Hardt/compas_test_repaired"+f2+"_cd.csv", y_cd, delimiter=",")
def German(f="data/german_post.csv"):
data_filename = f
test_and_val_data = pd.read_csv(data_filename)
# Randomly split the data into two sets - one for computing the fairness constants
order = np.arange(len(test_and_val_data))
leng = order.shape[0]
val_indices = order[0:int(leng*0.7)]
test_indices = order[int(leng*0.7):]
val_data = test_and_val_data.iloc[val_indices]
test_data = test_and_val_data.iloc[test_indices]
# Create model objects - one for each group, validation and test
group_0_val_data = val_data[val_data['group'] == 0]
group_1_val_data = val_data[val_data['group'] == 1]
group_0_test_data = test_data[test_data['group'] == 0]
group_1_test_data = test_data[test_data['group'] == 1]
group_0_val_model = Model(group_0_val_data['prediction'].to_numpy(), group_0_val_data['label'].to_numpy())
group_1_val_model = Model(group_1_val_data['prediction'].to_numpy(), group_1_val_data['label'].to_numpy())
group_0_test_model = Model(group_0_test_data['prediction'].to_numpy(), group_0_test_data['label'].to_numpy())
group_1_test_model = Model(group_1_test_data['prediction'].to_numpy(), group_1_test_data['label'].to_numpy())
# Find mixing rates for equalized odds models
_, _, mix_rates = Model.eq_odds(group_0_val_model, group_1_val_model)
# Apply the mixing rates to the test models
eq_odds_group_0_test_model, eq_odds_group_1_test_model = Model.eq_odds(group_0_test_model,
group_1_test_model,
mix_rates)
cd_eq_odds_group_0_test_model, cd_eq_odds_group_1_test_model = Model.eq_odds(group_1_test_model,
group_0_test_model,
mix_rates)
metric(eq_odds_group_0_test_model.label, eq_odds_group_0_test_model.pred,
eq_odds_group_1_test_model.label, eq_odds_group_1_test_model.pred)
y_cd = cd(eq_odds_group_0_test_model.pred.round(), eq_odds_group_1_test_model.pred.round(),\
cd_eq_odds_group_0_test_model.pred.round(),cd_eq_odds_group_1_test_model.pred.round())
group_0_test_data['pred'] = eq_odds_group_0_test_model.pred.round()
group_1_test_data['pred'] = eq_odds_group_1_test_model.pred.round()
df = group_0_test_data.append(group_1_test_data)
df = df.drop(['group', 'prediction', 'label'], axis=1).sample(frac=1)
df.to_csv("results_Hardt/german_test_repaired.csv", index=False)
np.savetxt("results_Hardt/german_test_repaired_cd.csv", y_cd, delimiter=",")
def Hardt(dataset):
make_dataset(dataset)
if dataset == 'adult':
Adult()
elif dataset == 'compas':
Compas()
elif dataset == 'german':
German() | en | 0.892149 | Percentage of samples belonging to the positive class True positive rate False positive rate True negative rate False negative rate Generalized false negative cost Generalized false positive cost # Create model objects - one for each group, validation and test # Find mixing rates for equalized odds models # Apply the mixing rates to the test models # Randomly split the data into two sets - one for computing the fairness constants # Create model objects - one for each group, validation and test # Find mixing rates for equalized odds models # Apply the mixing rates to the test models # Randomly split the data into two sets - one for computing the fairness constants # Create model objects - one for each group, validation and test # Find mixing rates for equalized odds models # Apply the mixing rates to the test models | 2.653644 | 3 |
tests/test_exceptions.py | nesnahnoj/py3-textract | 2 | 10047 | <filename>tests/test_exceptions.py
import unittest
import os
import subprocess
import base
class ExceptionTestCase(base.GenericUtilities, unittest.TestCase):
"""This class contains a bunch of tests to make sure that textract
fails in expected ways.
"""
def test_unsupported_extension_cli(self):
"""Make sure unsupported extension exits with non-zero status"""
filename = self.get_temp_filename(extension="extension")
command = "textract %(filename)s 2> /dev/null" % locals()
self.assertEqual(1, subprocess.call(command, shell=True))
os.remove(filename)
def test_unsupported_extension_python(self):
"""Make sure unsupported extension raises the correct error"""
filename = self.get_temp_filename(extension="extension")
import textract
from textract.exceptions import ExtensionNotSupported
with self.assertRaises(ExtensionNotSupported):
textract.process(filename)
os.remove(filename)
def test_missing_filename_cli(self):
"""Make sure missing files exits with non-zero status"""
filename = self.get_temp_filename()
os.remove(filename)
command = "textract %(filename)s 2> /dev/null" % locals()
self.assertEqual(1, subprocess.call(command, shell=True))
def test_missing_filename_python(self):
"""Make sure missing files raise the correct error"""
filename = self.get_temp_filename()
os.remove(filename)
import textract
from textract.exceptions import MissingFileError
with self.assertRaises(MissingFileError):
textract.process(filename)
| <filename>tests/test_exceptions.py
import unittest
import os
import subprocess
import base
class ExceptionTestCase(base.GenericUtilities, unittest.TestCase):
"""This class contains a bunch of tests to make sure that textract
fails in expected ways.
"""
def test_unsupported_extension_cli(self):
"""Make sure unsupported extension exits with non-zero status"""
filename = self.get_temp_filename(extension="extension")
command = "textract %(filename)s 2> /dev/null" % locals()
self.assertEqual(1, subprocess.call(command, shell=True))
os.remove(filename)
def test_unsupported_extension_python(self):
"""Make sure unsupported extension raises the correct error"""
filename = self.get_temp_filename(extension="extension")
import textract
from textract.exceptions import ExtensionNotSupported
with self.assertRaises(ExtensionNotSupported):
textract.process(filename)
os.remove(filename)
def test_missing_filename_cli(self):
"""Make sure missing files exits with non-zero status"""
filename = self.get_temp_filename()
os.remove(filename)
command = "textract %(filename)s 2> /dev/null" % locals()
self.assertEqual(1, subprocess.call(command, shell=True))
def test_missing_filename_python(self):
"""Make sure missing files raise the correct error"""
filename = self.get_temp_filename()
os.remove(filename)
import textract
from textract.exceptions import MissingFileError
with self.assertRaises(MissingFileError):
textract.process(filename)
| en | 0.892638 | This class contains a bunch of tests to make sure that textract fails in expected ways. Make sure unsupported extension exits with non-zero status Make sure unsupported extension raises the correct error Make sure missing files exits with non-zero status Make sure missing files raise the correct error | 2.947029 | 3 |
dloud_ads/circular_queue.py | dataloudlabs/dloud-ads | 0 | 10048 | <gh_stars>0
"""Queue implementation using circularly linked list for storage."""
class CircularQueue:
"""Queue implementation using circularly linked list for storage."""
class _Node:
"""Lightweight, nonpublic class for storing a singly linked node."""
__slots__ = '_element', '_next'
def __init__(self, element, next_element):
self._element = element
self._next = next_element
def __init__(self):
"""Create an empty queue."""
self._tail = None
self._size = 0
def __len__(self):
"""Return the number of elements in the queue."""
return self._size
def is_empty(self):
"""Return True if the queue is empty."""
return self._size == 0
def first(self):
"""Return (but do not remove) the element at the front of the queue.
Raise ValueError exception if the queue is empty.
"""
if self.is_empty():
raise ValueError('Queue is empty')
head = self._tail._next
return head._element
def dequeue(self):
"""Remove and return the first element of the queue (i.e., FIFO).
Raise ValueError exception if the queue is empty.
"""
if self.is_empty():
raise ValueError('Queue is empty')
oldhead = self._tail._next
if self._size == 1:
self._tail = None
else:
self._tail._next = oldhead._next
self._size -= 1
return oldhead._element
def enqueue(self, element):
"""Add an element to the back of queue."""
newest = self._Node(element, None)
if self.is_empty():
newest._next = newest
else:
newest._next = self._tail._next
self._tail._next = newest
self._tail = newest
self._size += 1
def rotate(self):
"""Rotate front element to the back of the queue."""
if self._size > 0:
self._tail = self._tail._next
| """Queue implementation using circularly linked list for storage."""
class CircularQueue:
"""Queue implementation using circularly linked list for storage."""
class _Node:
"""Lightweight, nonpublic class for storing a singly linked node."""
__slots__ = '_element', '_next'
def __init__(self, element, next_element):
self._element = element
self._next = next_element
def __init__(self):
"""Create an empty queue."""
self._tail = None
self._size = 0
def __len__(self):
"""Return the number of elements in the queue."""
return self._size
def is_empty(self):
"""Return True if the queue is empty."""
return self._size == 0
def first(self):
"""Return (but do not remove) the element at the front of the queue.
Raise ValueError exception if the queue is empty.
"""
if self.is_empty():
raise ValueError('Queue is empty')
head = self._tail._next
return head._element
def dequeue(self):
"""Remove and return the first element of the queue (i.e., FIFO).
Raise ValueError exception if the queue is empty.
"""
if self.is_empty():
raise ValueError('Queue is empty')
oldhead = self._tail._next
if self._size == 1:
self._tail = None
else:
self._tail._next = oldhead._next
self._size -= 1
return oldhead._element
def enqueue(self, element):
"""Add an element to the back of queue."""
newest = self._Node(element, None)
if self.is_empty():
newest._next = newest
else:
newest._next = self._tail._next
self._tail._next = newest
self._tail = newest
self._size += 1
def rotate(self):
"""Rotate front element to the back of the queue."""
if self._size > 0:
self._tail = self._tail._next | en | 0.710007 | Queue implementation using circularly linked list for storage. Queue implementation using circularly linked list for storage. Lightweight, nonpublic class for storing a singly linked node. Create an empty queue. Return the number of elements in the queue. Return True if the queue is empty. Return (but do not remove) the element at the front of the queue. Raise ValueError exception if the queue is empty. Remove and return the first element of the queue (i.e., FIFO). Raise ValueError exception if the queue is empty. Add an element to the back of queue. Rotate front element to the back of the queue. | 4.04477 | 4 |
suplemon/helpers.py | johnmbaughman/suplemon | 0 | 10049 | <reponame>johnmbaughman/suplemon<gh_stars>0
# -*- encoding: utf-8
"""
Various helper constants and functions.
"""
import os
import re
import sys
import time
import traceback
def curr_time():
"""Current time in %H:%M"""
return time.strftime("%H:%M")
def curr_time_sec():
"""Current time in %H:%M:%S"""
return time.strftime("%H:%M:%S")
def multisplit(data, delimiters):
pattern = "|".join(map(re.escape, delimiters))
return re.split(pattern, data)
def get_error_info():
"""Return info about last error."""
msg = "{0}\n{1}".format(str(traceback.format_exc()), str(sys.exc_info()))
return msg
def get_string_between(start, stop, s):
"""Search string for a substring between two delimeters. False if not found."""
i1 = s.find(start)
if i1 == -1:
return False
s = s[i1 + len(start):]
i2 = s.find(stop)
if i2 == -1:
return False
s = s[:i2]
return s
def whitespace(line):
"""Return index of first non whitespace character on a line."""
i = 0
for char in line:
if char != " ":
break
i += 1
return i
def parse_path(path):
"""Parse a relative path and return full directory and filename as a tuple."""
if path[:2] == "~" + os.sep:
p = os.path.expanduser("~")
path = os.path.join(p+os.sep, path[2:])
ab = os.path.abspath(path)
parts = os.path.split(ab)
return parts
| # -*- encoding: utf-8
"""
Various helper constants and functions.
"""
import os
import re
import sys
import time
import traceback
def curr_time():
"""Current time in %H:%M"""
return time.strftime("%H:%M")
def curr_time_sec():
"""Current time in %H:%M:%S"""
return time.strftime("%H:%M:%S")
def multisplit(data, delimiters):
pattern = "|".join(map(re.escape, delimiters))
return re.split(pattern, data)
def get_error_info():
"""Return info about last error."""
msg = "{0}\n{1}".format(str(traceback.format_exc()), str(sys.exc_info()))
return msg
def get_string_between(start, stop, s):
"""Search string for a substring between two delimeters. False if not found."""
i1 = s.find(start)
if i1 == -1:
return False
s = s[i1 + len(start):]
i2 = s.find(stop)
if i2 == -1:
return False
s = s[:i2]
return s
def whitespace(line):
"""Return index of first non whitespace character on a line."""
i = 0
for char in line:
if char != " ":
break
i += 1
return i
def parse_path(path):
"""Parse a relative path and return full directory and filename as a tuple."""
if path[:2] == "~" + os.sep:
p = os.path.expanduser("~")
path = os.path.join(p+os.sep, path[2:])
ab = os.path.abspath(path)
parts = os.path.split(ab)
return parts | en | 0.792321 | # -*- encoding: utf-8 Various helper constants and functions. Current time in %H:%M Current time in %H:%M:%S Return info about last error. Search string for a substring between two delimeters. False if not found. Return index of first non whitespace character on a line. Parse a relative path and return full directory and filename as a tuple. | 3.099634 | 3 |
geofem/emg3d/meshes.py | iisadoramacedo/geofem-master | 0 | 10050 | """
:mod:`meshes` -- Discretization
===============================
Everything related to meshes appropriate for the multigrid solver.
"""
# Copyright 2018-2020 The emg3d Developers.
#
# This file is part of emg3d.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy as np
from copy import deepcopy
from scipy import optimize
__all__ = ['TensorMesh', 'get_hx_h0', 'get_cell_numbers', 'get_stretched_h',
'get_domain', 'get_hx']
class TensorMesh:
"""Rudimentary mesh for multigrid calculation.
The tensor-mesh :class:`discretize.TensorMesh` is a powerful tool,
including sophisticated mesh-generation possibilities in 1D, 2D, and 3D,
plotting routines, and much more. However, in the multigrid solver we have
to generate a mesh at each level, many times over and over again, and we
only need a very limited set of attributes. This tensor-mesh class provides
all required attributes. All attributes here are the same as their
counterparts in :class:`discretize.TensorMesh` (both in name and value).
.. warning::
This is a slimmed-down version of :class:`discretize.TensorMesh`, meant
principally for internal use by the multigrid modeller. It is highly
recommended to use :class:`discretize.TensorMesh` to create the input
meshes instead of this class. There are no input-checks carried out
here, and there is only one accepted input format for `h` and `x0`.
Parameters
----------
h : list of three ndarrays
Cell widths in [x, y, z] directions.
x0 : ndarray of dimension (3, )
Origin (x, y, z).
"""
def __init__(self, h, x0):
"""Initialize the mesh."""
self.x0 = x0
# Width of cells.
self.hx = h[0]
self.hy = h[1]
self.hz = h[2]
# Cell related properties.
self.nCx = int(self.hx.size)
self.nCy = int(self.hy.size)
self.nCz = int(self.hz.size)
self.vnC = np.array([self.hx.size, self.hy.size, self.hz.size])
self.nC = int(self.vnC.prod())
self.vectorCCx = np.r_[0, self.hx[:-1].cumsum()]+self.hx*0.5+self.x0[0]
self.vectorCCy = np.r_[0, self.hy[:-1].cumsum()]+self.hy*0.5+self.x0[1]
self.vectorCCz = np.r_[0, self.hz[:-1].cumsum()]+self.hz*0.5+self.x0[2]
# Node related properties.
self.nNx = self.nCx + 1
self.nNy = self.nCy + 1
self.nNz = self.nCz + 1
self.vnN = np.array([self.nNx, self.nNy, self.nNz], dtype=int)
self.nN = int(self.vnN.prod())
self.vectorNx = np.r_[0., self.hx.cumsum()] + self.x0[0]
self.vectorNy = np.r_[0., self.hy.cumsum()] + self.x0[1]
self.vectorNz = np.r_[0., self.hz.cumsum()] + self.x0[2]
# Edge related properties.
self.vnEx = np.array([self.nCx, self.nNy, self.nNz], dtype=int)
self.vnEy = np.array([self.nNx, self.nCy, self.nNz], dtype=int)
self.vnEz = np.array([self.nNx, self.nNy, self.nCz], dtype=int)
self.nEx = int(self.vnEx.prod())
self.nEy = int(self.vnEy.prod())
self.nEz = int(self.vnEz.prod())
self.vnE = np.array([self.nEx, self.nEy, self.nEz], dtype=int)
self.nE = int(self.vnE.sum())
def __repr__(self):
"""Simple representation."""
return (f"TensorMesh: {self.nCx} x {self.nCy} x {self.nCz} "
f"({self.nC:,})")
def copy(self):
"""Return a copy of the TensorMesh."""
return TensorMesh.from_dict(self.to_dict(True))
def to_dict(self, copy=False):
"""Store the necessary information of the TensorMesh in a dict."""
out = {'hx': self.hx, 'hy': self.hy, 'hz': self.hz, 'x0': self.x0,
'__class__': self.__class__.__name__}
if copy:
return deepcopy(out)
else:
return out
@classmethod
def from_dict(cls, inp):
"""Convert dictionary into :class:`TensorMesh` instance.
Parameters
----------
inp : dict
Dictionary as obtained from :func:`TensorMesh.to_dict`.
The dictionary needs the keys `hx`, `hy`, `hz`, and `x0`.
Returns
-------
obj : :class:`TensorMesh` instance
"""
try:
return cls(h=[inp['hx'], inp['hy'], inp['hz']], x0=inp['x0'])
except KeyError as e:
print(f"* ERROR :: Variable {e} missing in `inp`.")
raise
@property
def vol(self):
"""Construct cell volumes of the 3D model as 1D array."""
if getattr(self, '_vol', None) is None:
self._vol = (self.hx[None, None, :]*self.hy[None, :, None] *
self.hz[:, None, None]).ravel()
return self._vol
def get_hx_h0(freq, res, domain, fixed=0., possible_nx=None, min_width=None,
pps=3, alpha=None, max_domain=100000., raise_error=True, verb=1,
return_info=False):
r"""Return cell widths and origin for given parameters.
Returns cell widths for the provided frequency, resistivity, domain extent,
and other parameters using a flexible amount of cells. See input parameters
for more details. A maximum of three hard/fixed boundaries can be provided
(one of which is the grid center).
The minimum cell width is calculated through :math:`\delta/\rm{pps}`, where
the skin depth is given by :math:`\delta = 503.3 \sqrt{\rho/f}`, and the
parameter `pps` stands for 'points-per-skindepth'. The minimum cell width
can be restricted with the parameter `min_width`.
The actual calculation domain adds a buffer zone around the (survey)
domain. The thickness of the buffer is six times the skin depth. The field
is basically zero after two wavelengths. A wavelength is
:math:`2\pi\delta`, hence roughly 6 times the skin depth. Taking a factor 6
gives therefore almost two wavelengths, as the field travels to the
boundary and back. The actual buffer thickness can be steered with the
`res` parameter.
One has to take into account that the air is very resistive, which has to
be considered not just in the vertical direction, but also in the
horizontal directions, as the airwave will bounce back from the sides
otherwise. In the marine case this issue reduces with increasing water
depth.
See Also
--------
get_stretched_h : Get `hx` for a fixed number `nx` and within a fixed
domain.
Parameters
----------
freq : float
Frequency (Hz) to calculate the skin depth. The skin depth is a concept
defined in the frequency domain. If a negative frequency is provided,
it is assumed that the calculation is carried out in the Laplace
domain. To calculate the skin depth, the value of `freq` is then
multiplied by :math:`-2\pi`, to simulate the closest
frequency-equivalent.
res : float or list
Resistivity (Ohm m) to calculate the skin depth. The skin depth is
used to calculate the minimum cell width and the boundary thicknesses.
Up to three resistivities can be provided:
- float: Same resistivity for everything;
- [min_width, boundaries];
- [min_width, left boundary, right boundary].
domain : list
Contains the survey-domain limits [min, max]. The actual calculation
domain consists of this domain plus a buffer zone around it, which
depends on frequency and resistivity.
fixed : list, optional
Fixed boundaries, one, two, or maximum three values. The grid is
centered around the first value. Hence it is the center location with
the smallest cell. Two more fixed boundaries can be added, at most one
on each side of the first one.
Default is 0.
possible_nx : list, optional
List of possible numbers of cells. See :func:`get_cell_numbers`.
Default is ``get_cell_numbers(500, 5, 3)``, which corresponds to
[16, 24, 32, 40, 48, 64, 80, 96, 128, 160, 192, 256, 320, 384].
min_width : float, list or None, optional
Minimum cell width restriction:
- None : No restriction;
- float : Fixed to this value, ignoring skin depth and `pps`.
- list [min, max] : Lower and upper bounds.
Default is None.
pps : int, optional
Points per skindepth; minimum cell width is calculated via
`dmin = skindepth/pps`.
Default = 3.
alpha : list, optional
Maximum alpha and step size to find a good alpha. The first value is
the maximum alpha of the survey domain, the second value is the maximum
alpha for the buffer zone, and the third value is the step size.
Default = [1, 1.5, .01], hence no stretching within the survey domain
and a maximum stretching of 1.5 in the buffer zone; step size is 0.01.
max_domain : float, optional
Maximum calculation domain from fixed[0] (usually source position).
Default is 100,000.
raise_error : bool, optional
If True, an error is raised if no suitable grid is found. Otherwise it
just prints a message and returns None's.
Default is True.
verb : int, optional
Verbosity, 0 or 1.
Default = 1.
return_info : bool
If True, a dictionary is returned with some grid info (min and max
cell width and alpha).
Returns
-------
hx : ndarray
Cell widths of mesh.
x0 : float
Origin of the mesh.
info : dict
Dictionary with mesh info; only if ``return_info=True``.
Keys:
- `dmin`: Minimum cell width;
- `dmax`: Maximum cell width;
- `amin`: Minimum alpha;
- `amax`: Maximum alpha.
"""
# Get variables with default lists:
if alpha is None:
alpha = [1, 1.5, 0.01]
if possible_nx is None:
possible_nx = get_cell_numbers(500, 5, 3)
# Cast resistivity value(s).
res = np.array(res, ndmin=1)
if res.size == 1:
res_arr = np.array([res[0], res[0], res[0]])
elif res.size == 2:
res_arr = np.array([res[0], res[1], res[1]])
else:
res_arr = np.array([res[0], res[1], res[2]])
# Cast and check fixed.
fixed = np.array(fixed, ndmin=1)
if fixed.size > 2:
# Check length.
if fixed.size > 3:
print("\n* ERROR :: Maximum three fixed boundaries permitted.\n"
f" Provided: {fixed.size}.")
raise ValueError("Wrong input for fixed")
# Sort second and third, so it doesn't matter how it was provided.
fixed = np.array([fixed[0], max(fixed[1:]), min(fixed[1:])])
# Check side.
if np.sign(np.diff(fixed[:2])) == np.sign(np.diff(fixed[::2])):
print("\n* ERROR :: 2nd and 3rd fixed boundaries have to be "
"left and right of the first one.\n "
f"Provided: [{fixed[0]}, {fixed[1]}, {fixed[2]}]")
raise ValueError("Wrong input for fixed")
# Calculate skin depth.
skind = 503.3*np.sqrt(res_arr/abs(freq))
if freq < 0: # For Laplace-domain calculations.
skind /= np.sqrt(2*np.pi)
# Minimum cell width.
dmin = skind[0]/pps
if min_width is not None: # Respect user input.
min_width = np.array(min_width, ndmin=1)
if min_width.size == 1:
dmin = min_width
else:
dmin = np.clip(dmin, *min_width)
# Survey domain; contains all sources and receivers.
domain = np.array(domain, dtype=float)
# Calculation domain; big enough to avoid boundary effects.
# To avoid boundary effects we want the signal to travel two wavelengths
# from the source to the boundary and back to the receiver.
# => 2*pi*sd ~ 6.3*sd = one wavelength => signal is ~ 0.2 %.
# Two wavelengths we can safely assume it is zero.
#
# The air does not follow the concept of skin depth, as it is a wave rather
# than diffusion. For this is the factor `max_domain`, which restricts
# the domain in each direction to this value from the center.
# (a) Source to edges of domain.
dist_in_domain = abs(domain - fixed[0])
# (b) Two wavelengths.
two_lambda = skind[1:]*4*np.pi
# (c) Required buffer, additional to domain.
dist_buff = np.max([np.zeros(2), (two_lambda - dist_in_domain)/2], axis=0)
# (d) Add buffer to domain.
calc_domain = np.array([domain[0]-dist_buff[0], domain[1]+dist_buff[1]])
# (e) Restrict total domain to max_domain.
calc_domain[0] = max(calc_domain[0], fixed[0]-max_domain)
calc_domain[1] = min(calc_domain[1], fixed[0]+max_domain)
# Initiate flag if terminated.
finished = False
# Initiate alpha variables for survey and calculation domains.
sa, ca = 1.0, 1.0
# Loop over possible cell numbers from small to big.
for nx in np.unique(possible_nx):
# Loop over possible alphas for domain.
for sa in np.arange(1.0, alpha[0]+alpha[2]/2, alpha[2]):
# Get current stretched grid cell sizes.
thxl = dmin*sa**np.arange(nx) # Left of origin.
thxr = dmin*sa**np.arange(nx) # Right of origin.
# 0. Adjust stretching for fixed boundaries.
if fixed.size > 1: # Move mesh to first fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]+np.cumsum(thxr)]
ii = np.argmin(abs(t_nx-fixed[1]))
thxr *= abs(fixed[1]-fixed[0])/np.sum(thxr[:ii])
if fixed.size > 2: # Move mesh to second fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]-np.cumsum(thxl)]
ii = np.argmin(abs(t_nx-fixed[2]))
thxl *= abs(fixed[2]-fixed[0])/np.sum(thxl[:ii])
# 1. Fill from center to left domain.
nl = np.sum((fixed[0]-np.cumsum(thxl)) > domain[0])+1
# 2. Fill from center to right domain.
nr = np.sum((fixed[0]+np.cumsum(thxr)) < domain[1])+1
# 3. Get remaining number of cells and check termination criteria.
nsdc = nl+nr # Number of domain cells.
nx_remain = nx-nsdc
# Not good, try next.
if nx_remain <= 0:
continue
# Create the current hx-array.
hx = np.r_[thxl[:nl][::-1], thxr[:nr]]
hxo = np.r_[thxl[:nl][::-1], thxr[:nr]]
# Get actual domain:
asurv_domain = [fixed[0]-np.sum(thxl[:nl]),
fixed[0]+np.sum(thxr[:nr])]
x0 = float(fixed[0]-np.sum(thxl[:nl]))
# Get actual stretching (differs in case of fixed layers).
sa_adj = np.max([hx[1:]/hx[:-1], hx[:-1]/hx[1:]])
# Loop over possible alphas for calc_domain.
for ca in np.arange(sa, alpha[1]+alpha[2]/2, alpha[2]):
# 4. Fill to left calc_domain.
thxl = hx[0]*ca**np.arange(1, nx_remain+1)
nl = np.sum((asurv_domain[0]-np.cumsum(thxl)) >
calc_domain[0])+1
# 5. Fill to right calc_domain.
thxr = hx[-1]*ca**np.arange(1, nx_remain+1)
nr = np.sum((asurv_domain[1]+np.cumsum(thxr)) <
calc_domain[1])+1
# 6. Get remaining number of cells and check termination
# criteria.
ncdc = nl+nr # Number of calc_domain cells.
nx_remain2 = nx-nsdc-ncdc
if nx_remain2 < 0: # Not good, try next.
continue
# Create hx-array.
nl += int(np.floor(nx_remain2/2)) # If uneven, add one cell
nr += int(np.ceil(nx_remain2/2)) # more on the right.
hx = np.r_[thxl[:nl][::-1], hx, thxr[:nr]]
# Calculate origin.
x0 = float(asurv_domain[0]-np.sum(thxl[:nl]))
# Mark it as finished and break out of the loop.
finished = True
break
if finished:
break
if finished:
break
# Check finished and print info about found grid.
if not finished:
# Throw message if no solution was found.
print("\n* ERROR :: No suitable grid found; relax your criteria.\n")
if raise_error:
raise ArithmeticError("No grid found!")
else:
hx, x0 = None, None
elif verb > 0:
print(f" Skin depth ", end="")
if res.size == 1:
print(f" [m] : {skind[0]:.0f}")
elif res.size == 2:
print(f"(m/l-r) [m] : {skind[0]:.0f} / {skind[1]:.0f}")
else:
print(f"(m/l/r) [m] : {skind[0]:.0f} / {skind[1]:.0f} / "
f"{skind[2]:.0f}")
print(f" Survey domain [m] : {domain[0]:.0f} - "
f"{domain[1]:.0f}")
print(f" Calculation domain [m] : {calc_domain[0]:.0f} - "
f"{calc_domain[1]:.0f}")
print(f" Final extent [m] : {x0:.0f} - "
f"{x0+np.sum(hx):.0f}")
extstr = f" Min/max cell width [m] : {min(hx):.0f} / "
alstr = f" Alpha survey"
nrstr = " Number of cells "
if not np.isclose(sa, sa_adj):
sastr = f"{sa:.3f} ({sa_adj:.3f})"
else:
sastr = f"{sa:.3f}"
print(extstr+f"{max(hxo):.0f} / {max(hx):.0f}")
print(alstr+f"/calc : {sastr} / {ca:.3f}")
print(nrstr+f"(s/c/r) : {nx} ({nsdc}/{ncdc}/{nx_remain2})")
print()
if return_info:
if not fixed.size > 1:
sa_adj = sa
info = {'dmin': dmin,
'dmax': np.nanmax(hx),
'amin': np.nanmin([ca, sa, sa_adj]),
'amax': np.nanmax([ca, sa, sa_adj])}
return hx, x0, info
else:
return hx, x0
def get_cell_numbers(max_nr, max_prime=5, min_div=3):
r"""Returns 'good' cell numbers for the multigrid method.
'Good' cell numbers are numbers which can be divided by 2 as many times as
possible. At the end there will be a low prime number.
The function adds all numbers :math:`p 2^n \leq M` for :math:`p={2, 3, ...,
p_\text{max}}` and :math:`n={n_\text{min}, n_\text{min}+1, ..., \infty}`;
:math:`M, p_\text{max}, n_\text{min}` correspond to `max_nr`, `max_prime`,
and `min_div`, respectively.
Parameters
----------
max_nr : int
Maximum number of cells.
max_prime : int
Highest permitted prime number p for p*2^n. {2, 3, 5, 7} are good upper
limits in order to avoid too big lowest grids in the multigrid method.
Default is 5.
min_div : int
Minimum times the number can be divided by two.
Default is 3.
Returns
-------
numbers : array
Array containing all possible cell numbers from lowest to highest.
"""
# Primes till 20.
primes = np.array([2, 3, 5, 7, 11, 13, 17, 19])
# Sanity check; 19 is already ridiculously high.
if max_prime > primes[-1]:
print(f"* ERROR :: Highest prime is {max_prime}, "
"please use a value < 20.")
raise ValueError("Highest prime too high")
# Restrict to max_prime.
primes = primes[primes <= max_prime]
# Get possible values.
# Currently restricted to prime*2**30 (for prime=2 => 1,073,741,824 cells).
numbers = primes[:, None]*2**np.arange(min_div, 30)
# Get unique values.
numbers = np.unique(numbers)
# Restrict to max_nr and return.
return numbers[numbers <= max_nr]
def get_stretched_h(min_width, domain, nx, x0=0, x1=None, resp_domain=False):
"""Return cell widths for a stretched grid within the domain.
Returns `nx` cell widths within `domain`, where the minimum cell width is
`min_width`. The cells are not stretched within `x0` and `x1`, and outside
uses a power-law stretching. The actual stretching factor and the number of
cells left and right of `x0` and `x1` are find in a minimization process.
The domain is not completely respected. The starting point of the domain
is, but the endpoint of the domain might slightly shift (this is more
likely the case for small `nx`, for big `nx` the shift should be small).
The new endpoint can be obtained with ``domain[0]+np.sum(hx)``. If you want
the domain to be respected absolutely, set ``resp_domain=True``. However,
be aware that this will introduce one stretch-factor which is different
from the other stretch factors, to accommodate the restriction. This
one-off factor is between the left- and right-side of `x0`, or, if `x1` is
provided, just after `x1`.
See Also
--------
get_hx_x0 : Get `hx` and `x0` for a flexible number of `nx` with
given bounds.
Parameters
----------
min_width : float
Minimum cell width. If x1 is provided, the actual minimum cell width
might be smaller than min_width.
domain : list
[start, end] of model domain.
nx : int
Number of cells.
x0 : float
Center of the grid. `x0` is restricted to `domain`.
Default is 0.
x1 : float
If provided, then no stretching is applied between `x0` and `x1`. The
non-stretched part starts at `x0` and stops at the first possible
location at or after `x1`. `x1` is restricted to `domain`. This will
min_width so that an integer number of cells fit within x0 and x1.
resp_domain : bool
If False (default), then the domain-end might shift slightly to assure
that the same stretching factor is applied throughout. If set to True,
however, the domain is respected absolutely. This will introduce one
stretch-factor which is different from the other stretch factors, to
accommodate the restriction. This one-off factor is between the left-
and right-side of `x0`, or, if `x1` is provided, just after `x1`.
Returns
-------
hx : ndarray
Cell widths of mesh.
"""
# Cast to arrays
domain = np.array(domain, dtype=float)
x0 = np.array(x0, dtype=float)
x0 = np.clip(x0, *domain) # Restrict to model domain
min_width = np.array(min_width, dtype=float)
if x1 is not None:
x1 = np.array(x1, dtype=float)
x1 = np.clip(x1, *domain) # Restrict to model domain
# If x1 is provided (a part is not stretched)
if x1 is not None:
# Store original values
xlim_orig = domain.copy()
nx_orig = int(nx)
x0_orig = x0.copy()
h_min_orig = min_width.copy()
# Get number of non-stretched cells
n_nos = int(np.ceil((x1-x0)/min_width))
# Re-calculate min_width to fit with x0-x1-limits:
min_width = (x1-x0)/n_nos
# Subtract one cell, because the standard scheme provides one
# min_width-cell.
n_nos -= 1
# Reset x0, because the first min_width comes from normal scheme
x0 += min_width
# Reset xmax for normal scheme
domain[1] -= n_nos*min_width
# Reset nx for normal scheme
nx -= n_nos
# If there are not enough points reset to standard procedure. The limit
# of five is arbitrary. However, nx should be much bigger than five
# anyways, otherwise stretched grid doesn't make sense.
if nx <= 5:
print("Warning :: Not enough points for non-stretched part,"
"ignoring therefore `x1`.")
domain = xlim_orig
nx = nx_orig
x0 = x0_orig
x1 = None
min_width = h_min_orig
# Get stretching factor (a = 1+alpha).
if min_width == 0 or min_width > np.diff(domain)/nx:
# If min_width is bigger than the domain-extent divided by nx, no
# stretching is required at all.
alpha = 0
else:
# Wrap _get_dx into a minimization function to call with fsolve.
def find_alpha(alpha, min_width, args):
"""Find alpha such that min(hx) = min_width."""
return min(get_hx(alpha, *args))/min_width-1
# Search for best alpha, must be at least 0
args = (domain, nx, x0)
alpha = max(0, optimize.fsolve(find_alpha, 0.02, (min_width, args)))
# With alpha get actual cell spacing with `resp_domain` to respect the
# users decision.
hx = get_hx(alpha, domain, nx, x0, resp_domain)
# Add the non-stretched center if x1 is provided
if x1 is not None:
hx = np.r_[hx[: np.argmin(hx)], np.ones(n_nos)*min_width,
hx[np.argmin(hx):]]
# Print warning min_width could not be respected.
if abs(hx.min() - min_width) > 0.1:
print(f"Warning :: Minimum cell width ({np.round(hx.min(), 2)} m) is "
"below `min_width`, because `nx` is too big for `domain`.")
return hx
def get_domain(x0=0, freq=1, res=0.3, limits=None, min_width=None,
fact_min=0.2, fact_neg=5, fact_pos=None):
r"""Get domain extent and minimum cell width as a function of skin depth.
Returns the extent of the calculation domain and the minimum cell width as
a multiple of the skin depth, with possible user restrictions on minimum
calculation domain and range of possible minimum cell widths.
.. math::
\delta &= 503.3 \sqrt{\frac{\rho}{f}} , \\
x_\text{start} &= x_0-k_\text{neg}\delta , \\
x_\text{end} &= x_0+k_\text{pos}\delta , \\
h_\text{min} &= k_\text{min} \delta .
Parameters
----------
x0 : float
Center of the calculation domain. Normally the source location.
Default is 0.
freq : float
Frequency (Hz) to calculate the skin depth. The skin depth is a concept
defined in the frequency domain. If a negative frequency is provided,
it is assumed that the calculation is carried out in the Laplace
domain. To calculate the skin depth, the value of `freq` is then
multiplied by :math:`-2\pi`, to simulate the closest
frequency-equivalent.
Default is 1 Hz.
res : float, optional
Resistivity (Ohm m) to calculate skin depth.
Default is 0.3 Ohm m (sea water).
limits : None or list
[start, end] of model domain. This extent represents the minimum extent
of the domain. The domain is therefore only adjusted if it has to reach
outside of [start, end].
Default is None.
min_width : None, float, or list of two floats
Minimum cell width is calculated as a function of skin depth:
fact_min*sd. If `min_width` is a float, this is used. If a list of
two values [min, max] are provided, they are used to restrain
min_width. Default is None.
fact_min, fact_neg, fact_pos : floats
The skin depth is multiplied with these factors to estimate:
- Minimum cell width (`fact_min`, default 0.2)
- Domain-start (`fact_neg`, default 5), and
- Domain-end (`fact_pos`, defaults to `fact_neg`).
Returns
-------
h_min : float
Minimum cell width.
domain : list
Start- and end-points of calculation domain.
"""
# Set fact_pos to fact_neg if not provided.
if fact_pos is None:
fact_pos = fact_neg
# Calculate the skin depth.
skind = 503.3*np.sqrt(res/abs(freq))
if freq < 0: # For Laplace-domain calculations.
skind /= np.sqrt(2*np.pi)
# Estimate minimum cell width.
h_min = fact_min*skind
if min_width is not None: # Respect user input.
if np.array(min_width).size == 1:
h_min = min_width
else:
h_min = np.clip(h_min, *min_width)
# Estimate calculation domain.
domain = [x0-fact_neg*skind, x0+fact_pos*skind]
if limits is not None: # Respect user input.
domain = [min(limits[0], domain[0]), max(limits[1], domain[1])]
return h_min, domain
def get_hx(alpha, domain, nx, x0, resp_domain=True):
r"""Return cell widths for given input.
Find the number of cells left and right of `x0`, `nl` and `nr`
respectively, for the provided alpha. For this, we solve
.. math:: \frac{x_\text{max}-x_0}{x_0-x_\text{min}} =
\frac{a^{nr}-1}{a^{nl}-1}
where :math:`a = 1+\alpha`.
Parameters
----------
alpha : float
Stretching factor `a` is given by ``a=1+alpha``.
domain : list
[start, end] of model domain.
nx : int
Number of cells.
x0 : float
Center of the grid. `x0` is restricted to `domain`.
resp_domain : bool
If False (default), then the domain-end might shift slightly to assure
that the same stretching factor is applied throughout. If set to True,
however, the domain is respected absolutely. This will introduce one
stretch-factor which is different from the other stretch factors, to
accommodate the restriction. This one-off factor is between the left-
and right-side of `x0`, or, if `x1` is provided, just after `x1`.
Returns
-------
hx : ndarray
Cell widths of mesh.
"""
if alpha <= 0.: # If alpha <= 0: equal spacing (no stretching at all)
hx = np.ones(nx)*np.diff(np.squeeze(domain))/nx
else: # Get stretched hx
a = alpha+1
# Get hx depending if x0 is on the domain boundary or not.
if np.isclose(x0, domain[0]) or np.isclose(x0, domain[1]):
# Get al a's
alr = np.diff(domain)*alpha/(a**nx-1)*a**np.arange(nx)
if x0 == domain[1]:
alr = alr[::-1]
# Calculate differences
hx = alr*np.diff(domain)/sum(alr)
else:
# Find number of elements left and right by solving:
# (xmax-x0)/(x0-xmin) = a**nr-1/(a**nl-1)
nr = np.arange(2, nx+1)
er = (domain[1]-x0)/(x0-domain[0]) - (a**nr[::-1]-1)/(a**nr-1)
nl = np.argmin(abs(np.floor(er)))+1
nr = nx-nl
# Get all a's
al = a**np.arange(nl-1, -1, -1)
ar = a**np.arange(1, nr+1)
# Calculate differences
if resp_domain:
# This version honours domain[0] and domain[1], but to achieve
# this it introduces one stretch-factor which is different from
# all the others between al to ar.
hx = np.r_[al*(x0-domain[0])/sum(al),
ar*(domain[1]-x0)/sum(ar)]
else:
# This version moves domain[1], but each stretch-factor is
# exactly the same.
fact = (x0-domain[0])/sum(al) # Take distance from al.
hx = np.r_[al, ar]*fact
# Note: this hx is equivalent as providing the following h
# to TensorMesh:
# h = [(min_width, nl-1, -a), (min_width, n_nos+1),
# (min_width, nr, a)]
return hx
| """
:mod:`meshes` -- Discretization
===============================
Everything related to meshes appropriate for the multigrid solver.
"""
# Copyright 2018-2020 The emg3d Developers.
#
# This file is part of emg3d.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy as np
from copy import deepcopy
from scipy import optimize
__all__ = ['TensorMesh', 'get_hx_h0', 'get_cell_numbers', 'get_stretched_h',
'get_domain', 'get_hx']
class TensorMesh:
"""Rudimentary mesh for multigrid calculation.
The tensor-mesh :class:`discretize.TensorMesh` is a powerful tool,
including sophisticated mesh-generation possibilities in 1D, 2D, and 3D,
plotting routines, and much more. However, in the multigrid solver we have
to generate a mesh at each level, many times over and over again, and we
only need a very limited set of attributes. This tensor-mesh class provides
all required attributes. All attributes here are the same as their
counterparts in :class:`discretize.TensorMesh` (both in name and value).
.. warning::
This is a slimmed-down version of :class:`discretize.TensorMesh`, meant
principally for internal use by the multigrid modeller. It is highly
recommended to use :class:`discretize.TensorMesh` to create the input
meshes instead of this class. There are no input-checks carried out
here, and there is only one accepted input format for `h` and `x0`.
Parameters
----------
h : list of three ndarrays
Cell widths in [x, y, z] directions.
x0 : ndarray of dimension (3, )
Origin (x, y, z).
"""
def __init__(self, h, x0):
"""Initialize the mesh."""
self.x0 = x0
# Width of cells.
self.hx = h[0]
self.hy = h[1]
self.hz = h[2]
# Cell related properties.
self.nCx = int(self.hx.size)
self.nCy = int(self.hy.size)
self.nCz = int(self.hz.size)
self.vnC = np.array([self.hx.size, self.hy.size, self.hz.size])
self.nC = int(self.vnC.prod())
self.vectorCCx = np.r_[0, self.hx[:-1].cumsum()]+self.hx*0.5+self.x0[0]
self.vectorCCy = np.r_[0, self.hy[:-1].cumsum()]+self.hy*0.5+self.x0[1]
self.vectorCCz = np.r_[0, self.hz[:-1].cumsum()]+self.hz*0.5+self.x0[2]
# Node related properties.
self.nNx = self.nCx + 1
self.nNy = self.nCy + 1
self.nNz = self.nCz + 1
self.vnN = np.array([self.nNx, self.nNy, self.nNz], dtype=int)
self.nN = int(self.vnN.prod())
self.vectorNx = np.r_[0., self.hx.cumsum()] + self.x0[0]
self.vectorNy = np.r_[0., self.hy.cumsum()] + self.x0[1]
self.vectorNz = np.r_[0., self.hz.cumsum()] + self.x0[2]
# Edge related properties.
self.vnEx = np.array([self.nCx, self.nNy, self.nNz], dtype=int)
self.vnEy = np.array([self.nNx, self.nCy, self.nNz], dtype=int)
self.vnEz = np.array([self.nNx, self.nNy, self.nCz], dtype=int)
self.nEx = int(self.vnEx.prod())
self.nEy = int(self.vnEy.prod())
self.nEz = int(self.vnEz.prod())
self.vnE = np.array([self.nEx, self.nEy, self.nEz], dtype=int)
self.nE = int(self.vnE.sum())
def __repr__(self):
"""Simple representation."""
return (f"TensorMesh: {self.nCx} x {self.nCy} x {self.nCz} "
f"({self.nC:,})")
def copy(self):
"""Return a copy of the TensorMesh."""
return TensorMesh.from_dict(self.to_dict(True))
def to_dict(self, copy=False):
"""Store the necessary information of the TensorMesh in a dict."""
out = {'hx': self.hx, 'hy': self.hy, 'hz': self.hz, 'x0': self.x0,
'__class__': self.__class__.__name__}
if copy:
return deepcopy(out)
else:
return out
@classmethod
def from_dict(cls, inp):
"""Convert dictionary into :class:`TensorMesh` instance.
Parameters
----------
inp : dict
Dictionary as obtained from :func:`TensorMesh.to_dict`.
The dictionary needs the keys `hx`, `hy`, `hz`, and `x0`.
Returns
-------
obj : :class:`TensorMesh` instance
"""
try:
return cls(h=[inp['hx'], inp['hy'], inp['hz']], x0=inp['x0'])
except KeyError as e:
print(f"* ERROR :: Variable {e} missing in `inp`.")
raise
@property
def vol(self):
"""Construct cell volumes of the 3D model as 1D array."""
if getattr(self, '_vol', None) is None:
self._vol = (self.hx[None, None, :]*self.hy[None, :, None] *
self.hz[:, None, None]).ravel()
return self._vol
def get_hx_h0(freq, res, domain, fixed=0., possible_nx=None, min_width=None,
pps=3, alpha=None, max_domain=100000., raise_error=True, verb=1,
return_info=False):
r"""Return cell widths and origin for given parameters.
Returns cell widths for the provided frequency, resistivity, domain extent,
and other parameters using a flexible amount of cells. See input parameters
for more details. A maximum of three hard/fixed boundaries can be provided
(one of which is the grid center).
The minimum cell width is calculated through :math:`\delta/\rm{pps}`, where
the skin depth is given by :math:`\delta = 503.3 \sqrt{\rho/f}`, and the
parameter `pps` stands for 'points-per-skindepth'. The minimum cell width
can be restricted with the parameter `min_width`.
The actual calculation domain adds a buffer zone around the (survey)
domain. The thickness of the buffer is six times the skin depth. The field
is basically zero after two wavelengths. A wavelength is
:math:`2\pi\delta`, hence roughly 6 times the skin depth. Taking a factor 6
gives therefore almost two wavelengths, as the field travels to the
boundary and back. The actual buffer thickness can be steered with the
`res` parameter.
One has to take into account that the air is very resistive, which has to
be considered not just in the vertical direction, but also in the
horizontal directions, as the airwave will bounce back from the sides
otherwise. In the marine case this issue reduces with increasing water
depth.
See Also
--------
get_stretched_h : Get `hx` for a fixed number `nx` and within a fixed
domain.
Parameters
----------
freq : float
Frequency (Hz) to calculate the skin depth. The skin depth is a concept
defined in the frequency domain. If a negative frequency is provided,
it is assumed that the calculation is carried out in the Laplace
domain. To calculate the skin depth, the value of `freq` is then
multiplied by :math:`-2\pi`, to simulate the closest
frequency-equivalent.
res : float or list
Resistivity (Ohm m) to calculate the skin depth. The skin depth is
used to calculate the minimum cell width and the boundary thicknesses.
Up to three resistivities can be provided:
- float: Same resistivity for everything;
- [min_width, boundaries];
- [min_width, left boundary, right boundary].
domain : list
Contains the survey-domain limits [min, max]. The actual calculation
domain consists of this domain plus a buffer zone around it, which
depends on frequency and resistivity.
fixed : list, optional
Fixed boundaries, one, two, or maximum three values. The grid is
centered around the first value. Hence it is the center location with
the smallest cell. Two more fixed boundaries can be added, at most one
on each side of the first one.
Default is 0.
possible_nx : list, optional
List of possible numbers of cells. See :func:`get_cell_numbers`.
Default is ``get_cell_numbers(500, 5, 3)``, which corresponds to
[16, 24, 32, 40, 48, 64, 80, 96, 128, 160, 192, 256, 320, 384].
min_width : float, list or None, optional
Minimum cell width restriction:
- None : No restriction;
- float : Fixed to this value, ignoring skin depth and `pps`.
- list [min, max] : Lower and upper bounds.
Default is None.
pps : int, optional
Points per skindepth; minimum cell width is calculated via
`dmin = skindepth/pps`.
Default = 3.
alpha : list, optional
Maximum alpha and step size to find a good alpha. The first value is
the maximum alpha of the survey domain, the second value is the maximum
alpha for the buffer zone, and the third value is the step size.
Default = [1, 1.5, .01], hence no stretching within the survey domain
and a maximum stretching of 1.5 in the buffer zone; step size is 0.01.
max_domain : float, optional
Maximum calculation domain from fixed[0] (usually source position).
Default is 100,000.
raise_error : bool, optional
If True, an error is raised if no suitable grid is found. Otherwise it
just prints a message and returns None's.
Default is True.
verb : int, optional
Verbosity, 0 or 1.
Default = 1.
return_info : bool
If True, a dictionary is returned with some grid info (min and max
cell width and alpha).
Returns
-------
hx : ndarray
Cell widths of mesh.
x0 : float
Origin of the mesh.
info : dict
Dictionary with mesh info; only if ``return_info=True``.
Keys:
- `dmin`: Minimum cell width;
- `dmax`: Maximum cell width;
- `amin`: Minimum alpha;
- `amax`: Maximum alpha.
"""
# Get variables with default lists:
if alpha is None:
alpha = [1, 1.5, 0.01]
if possible_nx is None:
possible_nx = get_cell_numbers(500, 5, 3)
# Cast resistivity value(s).
res = np.array(res, ndmin=1)
if res.size == 1:
res_arr = np.array([res[0], res[0], res[0]])
elif res.size == 2:
res_arr = np.array([res[0], res[1], res[1]])
else:
res_arr = np.array([res[0], res[1], res[2]])
# Cast and check fixed.
fixed = np.array(fixed, ndmin=1)
if fixed.size > 2:
# Check length.
if fixed.size > 3:
print("\n* ERROR :: Maximum three fixed boundaries permitted.\n"
f" Provided: {fixed.size}.")
raise ValueError("Wrong input for fixed")
# Sort second and third, so it doesn't matter how it was provided.
fixed = np.array([fixed[0], max(fixed[1:]), min(fixed[1:])])
# Check side.
if np.sign(np.diff(fixed[:2])) == np.sign(np.diff(fixed[::2])):
print("\n* ERROR :: 2nd and 3rd fixed boundaries have to be "
"left and right of the first one.\n "
f"Provided: [{fixed[0]}, {fixed[1]}, {fixed[2]}]")
raise ValueError("Wrong input for fixed")
# Calculate skin depth.
skind = 503.3*np.sqrt(res_arr/abs(freq))
if freq < 0: # For Laplace-domain calculations.
skind /= np.sqrt(2*np.pi)
# Minimum cell width.
dmin = skind[0]/pps
if min_width is not None: # Respect user input.
min_width = np.array(min_width, ndmin=1)
if min_width.size == 1:
dmin = min_width
else:
dmin = np.clip(dmin, *min_width)
# Survey domain; contains all sources and receivers.
domain = np.array(domain, dtype=float)
# Calculation domain; big enough to avoid boundary effects.
# To avoid boundary effects we want the signal to travel two wavelengths
# from the source to the boundary and back to the receiver.
# => 2*pi*sd ~ 6.3*sd = one wavelength => signal is ~ 0.2 %.
# Two wavelengths we can safely assume it is zero.
#
# The air does not follow the concept of skin depth, as it is a wave rather
# than diffusion. For this is the factor `max_domain`, which restricts
# the domain in each direction to this value from the center.
# (a) Source to edges of domain.
dist_in_domain = abs(domain - fixed[0])
# (b) Two wavelengths.
two_lambda = skind[1:]*4*np.pi
# (c) Required buffer, additional to domain.
dist_buff = np.max([np.zeros(2), (two_lambda - dist_in_domain)/2], axis=0)
# (d) Add buffer to domain.
calc_domain = np.array([domain[0]-dist_buff[0], domain[1]+dist_buff[1]])
# (e) Restrict total domain to max_domain.
calc_domain[0] = max(calc_domain[0], fixed[0]-max_domain)
calc_domain[1] = min(calc_domain[1], fixed[0]+max_domain)
# Initiate flag if terminated.
finished = False
# Initiate alpha variables for survey and calculation domains.
sa, ca = 1.0, 1.0
# Loop over possible cell numbers from small to big.
for nx in np.unique(possible_nx):
# Loop over possible alphas for domain.
for sa in np.arange(1.0, alpha[0]+alpha[2]/2, alpha[2]):
# Get current stretched grid cell sizes.
thxl = dmin*sa**np.arange(nx) # Left of origin.
thxr = dmin*sa**np.arange(nx) # Right of origin.
# 0. Adjust stretching for fixed boundaries.
if fixed.size > 1: # Move mesh to first fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]+np.cumsum(thxr)]
ii = np.argmin(abs(t_nx-fixed[1]))
thxr *= abs(fixed[1]-fixed[0])/np.sum(thxr[:ii])
if fixed.size > 2: # Move mesh to second fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]-np.cumsum(thxl)]
ii = np.argmin(abs(t_nx-fixed[2]))
thxl *= abs(fixed[2]-fixed[0])/np.sum(thxl[:ii])
# 1. Fill from center to left domain.
nl = np.sum((fixed[0]-np.cumsum(thxl)) > domain[0])+1
# 2. Fill from center to right domain.
nr = np.sum((fixed[0]+np.cumsum(thxr)) < domain[1])+1
# 3. Get remaining number of cells and check termination criteria.
nsdc = nl+nr # Number of domain cells.
nx_remain = nx-nsdc
# Not good, try next.
if nx_remain <= 0:
continue
# Create the current hx-array.
hx = np.r_[thxl[:nl][::-1], thxr[:nr]]
hxo = np.r_[thxl[:nl][::-1], thxr[:nr]]
# Get actual domain:
asurv_domain = [fixed[0]-np.sum(thxl[:nl]),
fixed[0]+np.sum(thxr[:nr])]
x0 = float(fixed[0]-np.sum(thxl[:nl]))
# Get actual stretching (differs in case of fixed layers).
sa_adj = np.max([hx[1:]/hx[:-1], hx[:-1]/hx[1:]])
# Loop over possible alphas for calc_domain.
for ca in np.arange(sa, alpha[1]+alpha[2]/2, alpha[2]):
# 4. Fill to left calc_domain.
thxl = hx[0]*ca**np.arange(1, nx_remain+1)
nl = np.sum((asurv_domain[0]-np.cumsum(thxl)) >
calc_domain[0])+1
# 5. Fill to right calc_domain.
thxr = hx[-1]*ca**np.arange(1, nx_remain+1)
nr = np.sum((asurv_domain[1]+np.cumsum(thxr)) <
calc_domain[1])+1
# 6. Get remaining number of cells and check termination
# criteria.
ncdc = nl+nr # Number of calc_domain cells.
nx_remain2 = nx-nsdc-ncdc
if nx_remain2 < 0: # Not good, try next.
continue
# Create hx-array.
nl += int(np.floor(nx_remain2/2)) # If uneven, add one cell
nr += int(np.ceil(nx_remain2/2)) # more on the right.
hx = np.r_[thxl[:nl][::-1], hx, thxr[:nr]]
# Calculate origin.
x0 = float(asurv_domain[0]-np.sum(thxl[:nl]))
# Mark it as finished and break out of the loop.
finished = True
break
if finished:
break
if finished:
break
# Check finished and print info about found grid.
if not finished:
# Throw message if no solution was found.
print("\n* ERROR :: No suitable grid found; relax your criteria.\n")
if raise_error:
raise ArithmeticError("No grid found!")
else:
hx, x0 = None, None
elif verb > 0:
print(f" Skin depth ", end="")
if res.size == 1:
print(f" [m] : {skind[0]:.0f}")
elif res.size == 2:
print(f"(m/l-r) [m] : {skind[0]:.0f} / {skind[1]:.0f}")
else:
print(f"(m/l/r) [m] : {skind[0]:.0f} / {skind[1]:.0f} / "
f"{skind[2]:.0f}")
print(f" Survey domain [m] : {domain[0]:.0f} - "
f"{domain[1]:.0f}")
print(f" Calculation domain [m] : {calc_domain[0]:.0f} - "
f"{calc_domain[1]:.0f}")
print(f" Final extent [m] : {x0:.0f} - "
f"{x0+np.sum(hx):.0f}")
extstr = f" Min/max cell width [m] : {min(hx):.0f} / "
alstr = f" Alpha survey"
nrstr = " Number of cells "
if not np.isclose(sa, sa_adj):
sastr = f"{sa:.3f} ({sa_adj:.3f})"
else:
sastr = f"{sa:.3f}"
print(extstr+f"{max(hxo):.0f} / {max(hx):.0f}")
print(alstr+f"/calc : {sastr} / {ca:.3f}")
print(nrstr+f"(s/c/r) : {nx} ({nsdc}/{ncdc}/{nx_remain2})")
print()
if return_info:
if not fixed.size > 1:
sa_adj = sa
info = {'dmin': dmin,
'dmax': np.nanmax(hx),
'amin': np.nanmin([ca, sa, sa_adj]),
'amax': np.nanmax([ca, sa, sa_adj])}
return hx, x0, info
else:
return hx, x0
def get_cell_numbers(max_nr, max_prime=5, min_div=3):
r"""Returns 'good' cell numbers for the multigrid method.
'Good' cell numbers are numbers which can be divided by 2 as many times as
possible. At the end there will be a low prime number.
The function adds all numbers :math:`p 2^n \leq M` for :math:`p={2, 3, ...,
p_\text{max}}` and :math:`n={n_\text{min}, n_\text{min}+1, ..., \infty}`;
:math:`M, p_\text{max}, n_\text{min}` correspond to `max_nr`, `max_prime`,
and `min_div`, respectively.
Parameters
----------
max_nr : int
Maximum number of cells.
max_prime : int
Highest permitted prime number p for p*2^n. {2, 3, 5, 7} are good upper
limits in order to avoid too big lowest grids in the multigrid method.
Default is 5.
min_div : int
Minimum times the number can be divided by two.
Default is 3.
Returns
-------
numbers : array
Array containing all possible cell numbers from lowest to highest.
"""
# Primes till 20.
primes = np.array([2, 3, 5, 7, 11, 13, 17, 19])
# Sanity check; 19 is already ridiculously high.
if max_prime > primes[-1]:
print(f"* ERROR :: Highest prime is {max_prime}, "
"please use a value < 20.")
raise ValueError("Highest prime too high")
# Restrict to max_prime.
primes = primes[primes <= max_prime]
# Get possible values.
# Currently restricted to prime*2**30 (for prime=2 => 1,073,741,824 cells).
numbers = primes[:, None]*2**np.arange(min_div, 30)
# Get unique values.
numbers = np.unique(numbers)
# Restrict to max_nr and return.
return numbers[numbers <= max_nr]
def get_stretched_h(min_width, domain, nx, x0=0, x1=None, resp_domain=False):
"""Return cell widths for a stretched grid within the domain.
Returns `nx` cell widths within `domain`, where the minimum cell width is
`min_width`. The cells are not stretched within `x0` and `x1`, and outside
uses a power-law stretching. The actual stretching factor and the number of
cells left and right of `x0` and `x1` are find in a minimization process.
The domain is not completely respected. The starting point of the domain
is, but the endpoint of the domain might slightly shift (this is more
likely the case for small `nx`, for big `nx` the shift should be small).
The new endpoint can be obtained with ``domain[0]+np.sum(hx)``. If you want
the domain to be respected absolutely, set ``resp_domain=True``. However,
be aware that this will introduce one stretch-factor which is different
from the other stretch factors, to accommodate the restriction. This
one-off factor is between the left- and right-side of `x0`, or, if `x1` is
provided, just after `x1`.
See Also
--------
get_hx_x0 : Get `hx` and `x0` for a flexible number of `nx` with
given bounds.
Parameters
----------
min_width : float
Minimum cell width. If x1 is provided, the actual minimum cell width
might be smaller than min_width.
domain : list
[start, end] of model domain.
nx : int
Number of cells.
x0 : float
Center of the grid. `x0` is restricted to `domain`.
Default is 0.
x1 : float
If provided, then no stretching is applied between `x0` and `x1`. The
non-stretched part starts at `x0` and stops at the first possible
location at or after `x1`. `x1` is restricted to `domain`. This will
min_width so that an integer number of cells fit within x0 and x1.
resp_domain : bool
If False (default), then the domain-end might shift slightly to assure
that the same stretching factor is applied throughout. If set to True,
however, the domain is respected absolutely. This will introduce one
stretch-factor which is different from the other stretch factors, to
accommodate the restriction. This one-off factor is between the left-
and right-side of `x0`, or, if `x1` is provided, just after `x1`.
Returns
-------
hx : ndarray
Cell widths of mesh.
"""
# Cast to arrays
domain = np.array(domain, dtype=float)
x0 = np.array(x0, dtype=float)
x0 = np.clip(x0, *domain) # Restrict to model domain
min_width = np.array(min_width, dtype=float)
if x1 is not None:
x1 = np.array(x1, dtype=float)
x1 = np.clip(x1, *domain) # Restrict to model domain
# If x1 is provided (a part is not stretched)
if x1 is not None:
# Store original values
xlim_orig = domain.copy()
nx_orig = int(nx)
x0_orig = x0.copy()
h_min_orig = min_width.copy()
# Get number of non-stretched cells
n_nos = int(np.ceil((x1-x0)/min_width))
# Re-calculate min_width to fit with x0-x1-limits:
min_width = (x1-x0)/n_nos
# Subtract one cell, because the standard scheme provides one
# min_width-cell.
n_nos -= 1
# Reset x0, because the first min_width comes from normal scheme
x0 += min_width
# Reset xmax for normal scheme
domain[1] -= n_nos*min_width
# Reset nx for normal scheme
nx -= n_nos
# If there are not enough points reset to standard procedure. The limit
# of five is arbitrary. However, nx should be much bigger than five
# anyways, otherwise stretched grid doesn't make sense.
if nx <= 5:
print("Warning :: Not enough points for non-stretched part,"
"ignoring therefore `x1`.")
domain = xlim_orig
nx = nx_orig
x0 = x0_orig
x1 = None
min_width = h_min_orig
# Get stretching factor (a = 1+alpha).
if min_width == 0 or min_width > np.diff(domain)/nx:
# If min_width is bigger than the domain-extent divided by nx, no
# stretching is required at all.
alpha = 0
else:
# Wrap _get_dx into a minimization function to call with fsolve.
def find_alpha(alpha, min_width, args):
"""Find alpha such that min(hx) = min_width."""
return min(get_hx(alpha, *args))/min_width-1
# Search for best alpha, must be at least 0
args = (domain, nx, x0)
alpha = max(0, optimize.fsolve(find_alpha, 0.02, (min_width, args)))
# With alpha get actual cell spacing with `resp_domain` to respect the
# users decision.
hx = get_hx(alpha, domain, nx, x0, resp_domain)
# Add the non-stretched center if x1 is provided
if x1 is not None:
hx = np.r_[hx[: np.argmin(hx)], np.ones(n_nos)*min_width,
hx[np.argmin(hx):]]
# Print warning min_width could not be respected.
if abs(hx.min() - min_width) > 0.1:
print(f"Warning :: Minimum cell width ({np.round(hx.min(), 2)} m) is "
"below `min_width`, because `nx` is too big for `domain`.")
return hx
def get_domain(x0=0, freq=1, res=0.3, limits=None, min_width=None,
fact_min=0.2, fact_neg=5, fact_pos=None):
r"""Get domain extent and minimum cell width as a function of skin depth.
Returns the extent of the calculation domain and the minimum cell width as
a multiple of the skin depth, with possible user restrictions on minimum
calculation domain and range of possible minimum cell widths.
.. math::
\delta &= 503.3 \sqrt{\frac{\rho}{f}} , \\
x_\text{start} &= x_0-k_\text{neg}\delta , \\
x_\text{end} &= x_0+k_\text{pos}\delta , \\
h_\text{min} &= k_\text{min} \delta .
Parameters
----------
x0 : float
Center of the calculation domain. Normally the source location.
Default is 0.
freq : float
Frequency (Hz) to calculate the skin depth. The skin depth is a concept
defined in the frequency domain. If a negative frequency is provided,
it is assumed that the calculation is carried out in the Laplace
domain. To calculate the skin depth, the value of `freq` is then
multiplied by :math:`-2\pi`, to simulate the closest
frequency-equivalent.
Default is 1 Hz.
res : float, optional
Resistivity (Ohm m) to calculate skin depth.
Default is 0.3 Ohm m (sea water).
limits : None or list
[start, end] of model domain. This extent represents the minimum extent
of the domain. The domain is therefore only adjusted if it has to reach
outside of [start, end].
Default is None.
min_width : None, float, or list of two floats
Minimum cell width is calculated as a function of skin depth:
fact_min*sd. If `min_width` is a float, this is used. If a list of
two values [min, max] are provided, they are used to restrain
min_width. Default is None.
fact_min, fact_neg, fact_pos : floats
The skin depth is multiplied with these factors to estimate:
- Minimum cell width (`fact_min`, default 0.2)
- Domain-start (`fact_neg`, default 5), and
- Domain-end (`fact_pos`, defaults to `fact_neg`).
Returns
-------
h_min : float
Minimum cell width.
domain : list
Start- and end-points of calculation domain.
"""
# Set fact_pos to fact_neg if not provided.
if fact_pos is None:
fact_pos = fact_neg
# Calculate the skin depth.
skind = 503.3*np.sqrt(res/abs(freq))
if freq < 0: # For Laplace-domain calculations.
skind /= np.sqrt(2*np.pi)
# Estimate minimum cell width.
h_min = fact_min*skind
if min_width is not None: # Respect user input.
if np.array(min_width).size == 1:
h_min = min_width
else:
h_min = np.clip(h_min, *min_width)
# Estimate calculation domain.
domain = [x0-fact_neg*skind, x0+fact_pos*skind]
if limits is not None: # Respect user input.
domain = [min(limits[0], domain[0]), max(limits[1], domain[1])]
return h_min, domain
def get_hx(alpha, domain, nx, x0, resp_domain=True):
r"""Return cell widths for given input.
Find the number of cells left and right of `x0`, `nl` and `nr`
respectively, for the provided alpha. For this, we solve
.. math:: \frac{x_\text{max}-x_0}{x_0-x_\text{min}} =
\frac{a^{nr}-1}{a^{nl}-1}
where :math:`a = 1+\alpha`.
Parameters
----------
alpha : float
Stretching factor `a` is given by ``a=1+alpha``.
domain : list
[start, end] of model domain.
nx : int
Number of cells.
x0 : float
Center of the grid. `x0` is restricted to `domain`.
resp_domain : bool
If False (default), then the domain-end might shift slightly to assure
that the same stretching factor is applied throughout. If set to True,
however, the domain is respected absolutely. This will introduce one
stretch-factor which is different from the other stretch factors, to
accommodate the restriction. This one-off factor is between the left-
and right-side of `x0`, or, if `x1` is provided, just after `x1`.
Returns
-------
hx : ndarray
Cell widths of mesh.
"""
if alpha <= 0.: # If alpha <= 0: equal spacing (no stretching at all)
hx = np.ones(nx)*np.diff(np.squeeze(domain))/nx
else: # Get stretched hx
a = alpha+1
# Get hx depending if x0 is on the domain boundary or not.
if np.isclose(x0, domain[0]) or np.isclose(x0, domain[1]):
# Get al a's
alr = np.diff(domain)*alpha/(a**nx-1)*a**np.arange(nx)
if x0 == domain[1]:
alr = alr[::-1]
# Calculate differences
hx = alr*np.diff(domain)/sum(alr)
else:
# Find number of elements left and right by solving:
# (xmax-x0)/(x0-xmin) = a**nr-1/(a**nl-1)
nr = np.arange(2, nx+1)
er = (domain[1]-x0)/(x0-domain[0]) - (a**nr[::-1]-1)/(a**nr-1)
nl = np.argmin(abs(np.floor(er)))+1
nr = nx-nl
# Get all a's
al = a**np.arange(nl-1, -1, -1)
ar = a**np.arange(1, nr+1)
# Calculate differences
if resp_domain:
# This version honours domain[0] and domain[1], but to achieve
# this it introduces one stretch-factor which is different from
# all the others between al to ar.
hx = np.r_[al*(x0-domain[0])/sum(al),
ar*(domain[1]-x0)/sum(ar)]
else:
# This version moves domain[1], but each stretch-factor is
# exactly the same.
fact = (x0-domain[0])/sum(al) # Take distance from al.
hx = np.r_[al, ar]*fact
# Note: this hx is equivalent as providing the following h
# to TensorMesh:
# h = [(min_width, nl-1, -a), (min_width, n_nos+1),
# (min_width, nr, a)]
return hx
| en | 0.798831 | :mod:`meshes` -- Discretization =============================== Everything related to meshes appropriate for the multigrid solver. # Copyright 2018-2020 The emg3d Developers. # # This file is part of emg3d. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. Rudimentary mesh for multigrid calculation. The tensor-mesh :class:`discretize.TensorMesh` is a powerful tool, including sophisticated mesh-generation possibilities in 1D, 2D, and 3D, plotting routines, and much more. However, in the multigrid solver we have to generate a mesh at each level, many times over and over again, and we only need a very limited set of attributes. This tensor-mesh class provides all required attributes. All attributes here are the same as their counterparts in :class:`discretize.TensorMesh` (both in name and value). .. warning:: This is a slimmed-down version of :class:`discretize.TensorMesh`, meant principally for internal use by the multigrid modeller. It is highly recommended to use :class:`discretize.TensorMesh` to create the input meshes instead of this class. There are no input-checks carried out here, and there is only one accepted input format for `h` and `x0`. Parameters ---------- h : list of three ndarrays Cell widths in [x, y, z] directions. x0 : ndarray of dimension (3, ) Origin (x, y, z). Initialize the mesh. # Width of cells. # Cell related properties. # Node related properties. # Edge related properties. Simple representation. Return a copy of the TensorMesh. Store the necessary information of the TensorMesh in a dict. Convert dictionary into :class:`TensorMesh` instance. Parameters ---------- inp : dict Dictionary as obtained from :func:`TensorMesh.to_dict`. The dictionary needs the keys `hx`, `hy`, `hz`, and `x0`. Returns ------- obj : :class:`TensorMesh` instance Construct cell volumes of the 3D model as 1D array. Return cell widths and origin for given parameters. Returns cell widths for the provided frequency, resistivity, domain extent, and other parameters using a flexible amount of cells. See input parameters for more details. A maximum of three hard/fixed boundaries can be provided (one of which is the grid center). The minimum cell width is calculated through :math:`\delta/\rm{pps}`, where the skin depth is given by :math:`\delta = 503.3 \sqrt{\rho/f}`, and the parameter `pps` stands for 'points-per-skindepth'. The minimum cell width can be restricted with the parameter `min_width`. The actual calculation domain adds a buffer zone around the (survey) domain. The thickness of the buffer is six times the skin depth. The field is basically zero after two wavelengths. A wavelength is :math:`2\pi\delta`, hence roughly 6 times the skin depth. Taking a factor 6 gives therefore almost two wavelengths, as the field travels to the boundary and back. The actual buffer thickness can be steered with the `res` parameter. One has to take into account that the air is very resistive, which has to be considered not just in the vertical direction, but also in the horizontal directions, as the airwave will bounce back from the sides otherwise. In the marine case this issue reduces with increasing water depth. See Also -------- get_stretched_h : Get `hx` for a fixed number `nx` and within a fixed domain. Parameters ---------- freq : float Frequency (Hz) to calculate the skin depth. The skin depth is a concept defined in the frequency domain. If a negative frequency is provided, it is assumed that the calculation is carried out in the Laplace domain. To calculate the skin depth, the value of `freq` is then multiplied by :math:`-2\pi`, to simulate the closest frequency-equivalent. res : float or list Resistivity (Ohm m) to calculate the skin depth. The skin depth is used to calculate the minimum cell width and the boundary thicknesses. Up to three resistivities can be provided: - float: Same resistivity for everything; - [min_width, boundaries]; - [min_width, left boundary, right boundary]. domain : list Contains the survey-domain limits [min, max]. The actual calculation domain consists of this domain plus a buffer zone around it, which depends on frequency and resistivity. fixed : list, optional Fixed boundaries, one, two, or maximum three values. The grid is centered around the first value. Hence it is the center location with the smallest cell. Two more fixed boundaries can be added, at most one on each side of the first one. Default is 0. possible_nx : list, optional List of possible numbers of cells. See :func:`get_cell_numbers`. Default is ``get_cell_numbers(500, 5, 3)``, which corresponds to [16, 24, 32, 40, 48, 64, 80, 96, 128, 160, 192, 256, 320, 384]. min_width : float, list or None, optional Minimum cell width restriction: - None : No restriction; - float : Fixed to this value, ignoring skin depth and `pps`. - list [min, max] : Lower and upper bounds. Default is None. pps : int, optional Points per skindepth; minimum cell width is calculated via `dmin = skindepth/pps`. Default = 3. alpha : list, optional Maximum alpha and step size to find a good alpha. The first value is the maximum alpha of the survey domain, the second value is the maximum alpha for the buffer zone, and the third value is the step size. Default = [1, 1.5, .01], hence no stretching within the survey domain and a maximum stretching of 1.5 in the buffer zone; step size is 0.01. max_domain : float, optional Maximum calculation domain from fixed[0] (usually source position). Default is 100,000. raise_error : bool, optional If True, an error is raised if no suitable grid is found. Otherwise it just prints a message and returns None's. Default is True. verb : int, optional Verbosity, 0 or 1. Default = 1. return_info : bool If True, a dictionary is returned with some grid info (min and max cell width and alpha). Returns ------- hx : ndarray Cell widths of mesh. x0 : float Origin of the mesh. info : dict Dictionary with mesh info; only if ``return_info=True``. Keys: - `dmin`: Minimum cell width; - `dmax`: Maximum cell width; - `amin`: Minimum alpha; - `amax`: Maximum alpha. # Get variables with default lists: # Cast resistivity value(s). # Cast and check fixed. # Check length. # Sort second and third, so it doesn't matter how it was provided. # Check side. # Calculate skin depth. # For Laplace-domain calculations. # Minimum cell width. # Respect user input. # Survey domain; contains all sources and receivers. # Calculation domain; big enough to avoid boundary effects. # To avoid boundary effects we want the signal to travel two wavelengths # from the source to the boundary and back to the receiver. # => 2*pi*sd ~ 6.3*sd = one wavelength => signal is ~ 0.2 %. # Two wavelengths we can safely assume it is zero. # # The air does not follow the concept of skin depth, as it is a wave rather # than diffusion. For this is the factor `max_domain`, which restricts # the domain in each direction to this value from the center. # (a) Source to edges of domain. # (b) Two wavelengths. # (c) Required buffer, additional to domain. # (d) Add buffer to domain. # (e) Restrict total domain to max_domain. # Initiate flag if terminated. # Initiate alpha variables for survey and calculation domains. # Loop over possible cell numbers from small to big. # Loop over possible alphas for domain. # Get current stretched grid cell sizes. # Left of origin. # Right of origin. # 0. Adjust stretching for fixed boundaries. # Move mesh to first fixed boundary. # Move mesh to second fixed boundary. # 1. Fill from center to left domain. # 2. Fill from center to right domain. # 3. Get remaining number of cells and check termination criteria. # Number of domain cells. # Not good, try next. # Create the current hx-array. # Get actual domain: # Get actual stretching (differs in case of fixed layers). # Loop over possible alphas for calc_domain. # 4. Fill to left calc_domain. # 5. Fill to right calc_domain. # 6. Get remaining number of cells and check termination # criteria. # Number of calc_domain cells. # Not good, try next. # Create hx-array. # If uneven, add one cell # more on the right. # Calculate origin. # Mark it as finished and break out of the loop. # Check finished and print info about found grid. # Throw message if no solution was found. Returns 'good' cell numbers for the multigrid method. 'Good' cell numbers are numbers which can be divided by 2 as many times as possible. At the end there will be a low prime number. The function adds all numbers :math:`p 2^n \leq M` for :math:`p={2, 3, ..., p_\text{max}}` and :math:`n={n_\text{min}, n_\text{min}+1, ..., \infty}`; :math:`M, p_\text{max}, n_\text{min}` correspond to `max_nr`, `max_prime`, and `min_div`, respectively. Parameters ---------- max_nr : int Maximum number of cells. max_prime : int Highest permitted prime number p for p*2^n. {2, 3, 5, 7} are good upper limits in order to avoid too big lowest grids in the multigrid method. Default is 5. min_div : int Minimum times the number can be divided by two. Default is 3. Returns ------- numbers : array Array containing all possible cell numbers from lowest to highest. # Primes till 20. # Sanity check; 19 is already ridiculously high. # Restrict to max_prime. # Get possible values. # Currently restricted to prime*2**30 (for prime=2 => 1,073,741,824 cells). # Get unique values. # Restrict to max_nr and return. Return cell widths for a stretched grid within the domain. Returns `nx` cell widths within `domain`, where the minimum cell width is `min_width`. The cells are not stretched within `x0` and `x1`, and outside uses a power-law stretching. The actual stretching factor and the number of cells left and right of `x0` and `x1` are find in a minimization process. The domain is not completely respected. The starting point of the domain is, but the endpoint of the domain might slightly shift (this is more likely the case for small `nx`, for big `nx` the shift should be small). The new endpoint can be obtained with ``domain[0]+np.sum(hx)``. If you want the domain to be respected absolutely, set ``resp_domain=True``. However, be aware that this will introduce one stretch-factor which is different from the other stretch factors, to accommodate the restriction. This one-off factor is between the left- and right-side of `x0`, or, if `x1` is provided, just after `x1`. See Also -------- get_hx_x0 : Get `hx` and `x0` for a flexible number of `nx` with given bounds. Parameters ---------- min_width : float Minimum cell width. If x1 is provided, the actual minimum cell width might be smaller than min_width. domain : list [start, end] of model domain. nx : int Number of cells. x0 : float Center of the grid. `x0` is restricted to `domain`. Default is 0. x1 : float If provided, then no stretching is applied between `x0` and `x1`. The non-stretched part starts at `x0` and stops at the first possible location at or after `x1`. `x1` is restricted to `domain`. This will min_width so that an integer number of cells fit within x0 and x1. resp_domain : bool If False (default), then the domain-end might shift slightly to assure that the same stretching factor is applied throughout. If set to True, however, the domain is respected absolutely. This will introduce one stretch-factor which is different from the other stretch factors, to accommodate the restriction. This one-off factor is between the left- and right-side of `x0`, or, if `x1` is provided, just after `x1`. Returns ------- hx : ndarray Cell widths of mesh. # Cast to arrays # Restrict to model domain # Restrict to model domain # If x1 is provided (a part is not stretched) # Store original values # Get number of non-stretched cells # Re-calculate min_width to fit with x0-x1-limits: # Subtract one cell, because the standard scheme provides one # min_width-cell. # Reset x0, because the first min_width comes from normal scheme # Reset xmax for normal scheme # Reset nx for normal scheme # If there are not enough points reset to standard procedure. The limit # of five is arbitrary. However, nx should be much bigger than five # anyways, otherwise stretched grid doesn't make sense. # Get stretching factor (a = 1+alpha). # If min_width is bigger than the domain-extent divided by nx, no # stretching is required at all. # Wrap _get_dx into a minimization function to call with fsolve. Find alpha such that min(hx) = min_width. # Search for best alpha, must be at least 0 # With alpha get actual cell spacing with `resp_domain` to respect the # users decision. # Add the non-stretched center if x1 is provided # Print warning min_width could not be respected. Get domain extent and minimum cell width as a function of skin depth. Returns the extent of the calculation domain and the minimum cell width as a multiple of the skin depth, with possible user restrictions on minimum calculation domain and range of possible minimum cell widths. .. math:: \delta &= 503.3 \sqrt{\frac{\rho}{f}} , \\ x_\text{start} &= x_0-k_\text{neg}\delta , \\ x_\text{end} &= x_0+k_\text{pos}\delta , \\ h_\text{min} &= k_\text{min} \delta . Parameters ---------- x0 : float Center of the calculation domain. Normally the source location. Default is 0. freq : float Frequency (Hz) to calculate the skin depth. The skin depth is a concept defined in the frequency domain. If a negative frequency is provided, it is assumed that the calculation is carried out in the Laplace domain. To calculate the skin depth, the value of `freq` is then multiplied by :math:`-2\pi`, to simulate the closest frequency-equivalent. Default is 1 Hz. res : float, optional Resistivity (Ohm m) to calculate skin depth. Default is 0.3 Ohm m (sea water). limits : None or list [start, end] of model domain. This extent represents the minimum extent of the domain. The domain is therefore only adjusted if it has to reach outside of [start, end]. Default is None. min_width : None, float, or list of two floats Minimum cell width is calculated as a function of skin depth: fact_min*sd. If `min_width` is a float, this is used. If a list of two values [min, max] are provided, they are used to restrain min_width. Default is None. fact_min, fact_neg, fact_pos : floats The skin depth is multiplied with these factors to estimate: - Minimum cell width (`fact_min`, default 0.2) - Domain-start (`fact_neg`, default 5), and - Domain-end (`fact_pos`, defaults to `fact_neg`). Returns ------- h_min : float Minimum cell width. domain : list Start- and end-points of calculation domain. # Set fact_pos to fact_neg if not provided. # Calculate the skin depth. # For Laplace-domain calculations. # Estimate minimum cell width. # Respect user input. # Estimate calculation domain. # Respect user input. Return cell widths for given input. Find the number of cells left and right of `x0`, `nl` and `nr` respectively, for the provided alpha. For this, we solve .. math:: \frac{x_\text{max}-x_0}{x_0-x_\text{min}} = \frac{a^{nr}-1}{a^{nl}-1} where :math:`a = 1+\alpha`. Parameters ---------- alpha : float Stretching factor `a` is given by ``a=1+alpha``. domain : list [start, end] of model domain. nx : int Number of cells. x0 : float Center of the grid. `x0` is restricted to `domain`. resp_domain : bool If False (default), then the domain-end might shift slightly to assure that the same stretching factor is applied throughout. If set to True, however, the domain is respected absolutely. This will introduce one stretch-factor which is different from the other stretch factors, to accommodate the restriction. This one-off factor is between the left- and right-side of `x0`, or, if `x1` is provided, just after `x1`. Returns ------- hx : ndarray Cell widths of mesh. # If alpha <= 0: equal spacing (no stretching at all) # Get stretched hx # Get hx depending if x0 is on the domain boundary or not. # Get al a's # Calculate differences # Find number of elements left and right by solving: # (xmax-x0)/(x0-xmin) = a**nr-1/(a**nl-1) # Get all a's # Calculate differences # This version honours domain[0] and domain[1], but to achieve # this it introduces one stretch-factor which is different from # all the others between al to ar. # This version moves domain[1], but each stretch-factor is # exactly the same. # Take distance from al. # Note: this hx is equivalent as providing the following h # to TensorMesh: # h = [(min_width, nl-1, -a), (min_width, n_nos+1), # (min_width, nr, a)] | 2.000068 | 2 |
QUICK_START/NODE_SQUEEZESEG_CLUSTER/src/script/squeezeseg/utils/clock.py | Hqss/DINK | 189 | 10051 | <reponame>Hqss/DINK
#! /usr/bin/python2
# -*- coding: utf-8 -*-
"""
Clock function to take running time following Segmatch.
"""
# BSD 3-Clause License
#
# Copyright (c) 2019, FPAI
# Copyright (c) 2019, SeriouslyHAO
# Copyright (c) 2019, xcj2019
# Copyright (c) 2019, Leonfirst
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
class Clock(object):
def __init__(self):
self.kSecondsToMiliseconds = 1000.0
self.kMicrosecondsToMiliseconds = 0.001
self.start()
def start(self):
self.real_time_start_ = datetime.datetime.now()
def takeTime(self):
seconds = (datetime.datetime.now() - self.real_time_start_).seconds
useconds = (datetime.datetime.now() - self.real_time_start_).microseconds
self.real_time_ms_ = (seconds*self.kSecondsToMiliseconds + useconds*self.kMicrosecondsToMiliseconds) + 0.5
def getRealTime(self):
return self.real_time_ms_
def takeRealTime(self):
self.takeTime()
return self.getRealTime()
| #! /usr/bin/python2
# -*- coding: utf-8 -*-
"""
Clock function to take running time following Segmatch.
"""
# BSD 3-Clause License
#
# Copyright (c) 2019, FPAI
# Copyright (c) 2019, SeriouslyHAO
# Copyright (c) 2019, xcj2019
# Copyright (c) 2019, Leonfirst
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
class Clock(object):
def __init__(self):
self.kSecondsToMiliseconds = 1000.0
self.kMicrosecondsToMiliseconds = 0.001
self.start()
def start(self):
self.real_time_start_ = datetime.datetime.now()
def takeTime(self):
seconds = (datetime.datetime.now() - self.real_time_start_).seconds
useconds = (datetime.datetime.now() - self.real_time_start_).microseconds
self.real_time_ms_ = (seconds*self.kSecondsToMiliseconds + useconds*self.kMicrosecondsToMiliseconds) + 0.5
def getRealTime(self):
return self.real_time_ms_
def takeRealTime(self):
self.takeTime()
return self.getRealTime() | en | 0.732176 | #! /usr/bin/python2 # -*- coding: utf-8 -*- Clock function to take running time following Segmatch. # BSD 3-Clause License # # Copyright (c) 2019, FPAI # Copyright (c) 2019, SeriouslyHAO # Copyright (c) 2019, xcj2019 # Copyright (c) 2019, Leonfirst # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 2.590278 | 3 |
office365/sharepoint/view_collection.py | andebor/Office365-REST-Python-Client | 0 | 10052 | <gh_stars>0
from office365.runtime.client_object_collection import ClientObjectCollection
from office365.runtime.resource_path_service_operation import ResourcePathServiceOperation
from office365.sharepoint.view import View
class ViewCollection(ClientObjectCollection):
"""Represents a collection of View resources."""
def __init__(self, context, resource_path=None):
super(ViewCollection, self).__init__(context, View, resource_path)
def get_by_title(self, view_title):
"""Gets the list view with the specified title."""
return View(self.context,
ResourcePathServiceOperation(self.context, self.resource_path, "GetByTitle", [view_title]))
def get_by_id(self, view_id):
"""Gets the list view with the specified ID."""
return View(self.context,
ResourcePathServiceOperation(self.context, self.resource_path, "GetById", [view_id]))
| from office365.runtime.client_object_collection import ClientObjectCollection
from office365.runtime.resource_path_service_operation import ResourcePathServiceOperation
from office365.sharepoint.view import View
class ViewCollection(ClientObjectCollection):
"""Represents a collection of View resources."""
def __init__(self, context, resource_path=None):
super(ViewCollection, self).__init__(context, View, resource_path)
def get_by_title(self, view_title):
"""Gets the list view with the specified title."""
return View(self.context,
ResourcePathServiceOperation(self.context, self.resource_path, "GetByTitle", [view_title]))
def get_by_id(self, view_id):
"""Gets the list view with the specified ID."""
return View(self.context,
ResourcePathServiceOperation(self.context, self.resource_path, "GetById", [view_id])) | en | 0.657427 | Represents a collection of View resources. Gets the list view with the specified title. Gets the list view with the specified ID. | 2.255264 | 2 |
common-scrapers/common_src/scrapers/second_extinction.py | mrPaintMan/blog-scraper | 0 | 10053 | from common_src.lib.model.post import Post
from common_src.lib.model.source import Source
from common_src.scrapers.abstract_scraper import make_soup, remove_dups, now
SOURCE_CODE = "second_extinction"
WEBSITE = "https://www.secondextinctiongame.com/news"
ALT_IMAGE = 'https://www.secondextinctiongame.com/static/242486b363d867dc483deb6d7038dde1/d8255/se_screenshot_5.jpg'
FILENAME = "../resources/data/second_extinction.txt"
def get_source():
name = "Second Extinction"
description = 'Second Extinction is a first person shooter game where earth has been invaded by mutated dinosaurs.'
profile_image = 'https://www.secondextinctiongame.com/static/logo-0d52f8575a251eff8ebd6e2d6bd6c51b.png'
return Source(SOURCE_CODE, name, description, profile_image, ALT_IMAGE, None)
def scrape():
soup = make_soup(WEBSITE)
base_site = "https://www.secondextinctiongame.com"
data = []
for post in soup.findAll("article", {"class": "cgYILD"}):
date = post.find("time").text.replace("-", "") + "0000"
title = post.find("h3").text.strip()
link = base_site + post.find("a").get("href")
alt_image = ALT_IMAGE
image = base_site + post.find("picture").find("img").get("src").replace(" ", "%20")
data.append(Post(None, date, title, link, image, alt_image, SOURCE_CODE, None))
if len(data) % 25 == 0:
print(now() + f"Processed {len(data)} posts")
return remove_dups(data)
| from common_src.lib.model.post import Post
from common_src.lib.model.source import Source
from common_src.scrapers.abstract_scraper import make_soup, remove_dups, now
SOURCE_CODE = "second_extinction"
WEBSITE = "https://www.secondextinctiongame.com/news"
ALT_IMAGE = 'https://www.secondextinctiongame.com/static/242486b363d867dc483deb6d7038dde1/d8255/se_screenshot_5.jpg'
FILENAME = "../resources/data/second_extinction.txt"
def get_source():
name = "Second Extinction"
description = 'Second Extinction is a first person shooter game where earth has been invaded by mutated dinosaurs.'
profile_image = 'https://www.secondextinctiongame.com/static/logo-0d52f8575a251eff8ebd6e2d6bd6c51b.png'
return Source(SOURCE_CODE, name, description, profile_image, ALT_IMAGE, None)
def scrape():
soup = make_soup(WEBSITE)
base_site = "https://www.secondextinctiongame.com"
data = []
for post in soup.findAll("article", {"class": "cgYILD"}):
date = post.find("time").text.replace("-", "") + "0000"
title = post.find("h3").text.strip()
link = base_site + post.find("a").get("href")
alt_image = ALT_IMAGE
image = base_site + post.find("picture").find("img").get("src").replace(" ", "%20")
data.append(Post(None, date, title, link, image, alt_image, SOURCE_CODE, None))
if len(data) % 25 == 0:
print(now() + f"Processed {len(data)} posts")
return remove_dups(data)
| none | 1 | 2.902182 | 3 |
|
systems/ILSVRC12/AlexNet/alexnet.py | mdatres/quantlab | 7 | 10054 | #
# alexnet.py
#
# Author(s):
# <NAME> <<EMAIL>>
#
# Copyright (c) 2020-2021 ETH Zurich.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import torch.nn as nn
class AlexNet(nn.Module):
def __init__(self, use_bn: bool, num_classes: int = 1000, seed : int = -1) -> None:
super(AlexNet, self).__init__()
self.features = self._make_features(use_bn)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = self._make_classifier(num_classes)
self._initialize_weights(seed)
def _make_features(self, use_bn: bool) -> nn.Sequential:
modules = []
# conv 1
modules += [nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2, bias=not use_bn)]
modules += [nn.BatchNorm2d(64)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# max pool
modules += [nn.MaxPool2d(kernel_size=3, stride=2)]
# conv 2
modules += [nn.Conv2d(64, 192, kernel_size=5, padding=2, bias=not use_bn)]
modules += [nn.BatchNorm2d(192)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# max pool
modules += [nn.MaxPool2d(kernel_size=3, stride=2)]
# conv 3
modules += [nn.Conv2d(192, 384, kernel_size=3, padding=1, bias=not use_bn)]
modules += [nn.BatchNorm2d(384)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# conv 4
modules += [nn.Conv2d(384, 256, kernel_size=3, padding=1, bias=not use_bn)]
modules += [nn.BatchNorm2d(256)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# conv 5
modules += [nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=not use_bn)]
modules += [nn.BatchNorm2d(256)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# max pool
modules += [nn.MaxPool2d(kernel_size=3, stride=2)]
return nn.Sequential(*modules)
def _make_classifier(self, num_classes: int) -> nn.Sequential:
modules = []
# dropout
modules += [nn.Dropout()]
# linear 1
modules += [nn.Linear(256 * 6 * 6, 4096)]
modules += [nn.ReLU(inplace=True)]
# dropout
modules += [nn.Dropout()]
# linear 2
modules += [nn.Linear(4096, 4096)]
modules += [nn.ReLU(inplace=True)]
# linear 3
modules += [nn.Linear(4096, num_classes)]
return nn.Sequential(*modules)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self, seed: int = -1):
if seed >= 0:
torch.manual_seed(seed)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
| #
# alexnet.py
#
# Author(s):
# <NAME> <<EMAIL>>
#
# Copyright (c) 2020-2021 ETH Zurich.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import torch.nn as nn
class AlexNet(nn.Module):
def __init__(self, use_bn: bool, num_classes: int = 1000, seed : int = -1) -> None:
super(AlexNet, self).__init__()
self.features = self._make_features(use_bn)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = self._make_classifier(num_classes)
self._initialize_weights(seed)
def _make_features(self, use_bn: bool) -> nn.Sequential:
modules = []
# conv 1
modules += [nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2, bias=not use_bn)]
modules += [nn.BatchNorm2d(64)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# max pool
modules += [nn.MaxPool2d(kernel_size=3, stride=2)]
# conv 2
modules += [nn.Conv2d(64, 192, kernel_size=5, padding=2, bias=not use_bn)]
modules += [nn.BatchNorm2d(192)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# max pool
modules += [nn.MaxPool2d(kernel_size=3, stride=2)]
# conv 3
modules += [nn.Conv2d(192, 384, kernel_size=3, padding=1, bias=not use_bn)]
modules += [nn.BatchNorm2d(384)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# conv 4
modules += [nn.Conv2d(384, 256, kernel_size=3, padding=1, bias=not use_bn)]
modules += [nn.BatchNorm2d(256)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# conv 5
modules += [nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=not use_bn)]
modules += [nn.BatchNorm2d(256)] if use_bn else []
modules += [nn.ReLU(inplace=True)]
# max pool
modules += [nn.MaxPool2d(kernel_size=3, stride=2)]
return nn.Sequential(*modules)
def _make_classifier(self, num_classes: int) -> nn.Sequential:
modules = []
# dropout
modules += [nn.Dropout()]
# linear 1
modules += [nn.Linear(256 * 6 * 6, 4096)]
modules += [nn.ReLU(inplace=True)]
# dropout
modules += [nn.Dropout()]
# linear 2
modules += [nn.Linear(4096, 4096)]
modules += [nn.ReLU(inplace=True)]
# linear 3
modules += [nn.Linear(4096, num_classes)]
return nn.Sequential(*modules)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self, seed: int = -1):
if seed >= 0:
torch.manual_seed(seed)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
| en | 0.801549 | # # alexnet.py # # Author(s): # <NAME> <<EMAIL>> # # Copyright (c) 2020-2021 ETH Zurich. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # conv 1 # max pool # conv 2 # max pool # conv 3 # conv 4 # conv 5 # max pool # dropout # linear 1 # dropout # linear 2 # linear 3 | 2.425672 | 2 |
e/mail-relay/web/apps/core/migrations/0012_auto_20151105_1442.py | zhouli121018/nodejsgm | 0 | 10055 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0011_customersetting'),
]
operations = [
migrations.AlterField(
model_name='customersetting',
name='bounce',
field=models.BooleanField(default=True, verbose_name='\u5f00\u542f\u9000\u4fe1'),
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0011_customersetting'),
]
operations = [
migrations.AlterField(
model_name='customersetting',
name='bounce',
field=models.BooleanField(default=True, verbose_name='\u5f00\u542f\u9000\u4fe1'),
),
] | en | 0.769321 | # -*- coding: utf-8 -*- | 1.321897 | 1 |
madlib/main.py | FredericIV/PythonPractice | 0 | 10056 | <filename>madlib/main.py<gh_stars>0
#!/bin/python3
# Libraries
import sys
import array
import textwrap
# Variable Declaration
madlib_selection = "example.txt"
madlib_array = array.array('i')
copy_state = False
user_filler = ""
new_madlib = []
if len(sys.argv) != 1:
print(len(sys.argv))
if sys.argv[1] == "-":
print("This program takes the path to a madlib as an argument. Showing default now.")
## TODO: Add input validation, i.e. make sure the input is actully text.
else:
## TODO: Add pipe as input option.
madlib_selection = sys.argv[1]
with open(madlib_selection, 'r') as madlib:
read_madlib = madlib.read()
for i in range(read_madlib.count("#")//2):
first = read_madlib.index("#")
second = read_madlib.index("#", first+1)
replacement = input("Please give me " + read_madlib[first+1:second] + ":")
new_madlib = read_madlib[0:first] + replacement + read_madlib[second+1:]
read_madlib = new_madlib
print("\n\n\n")
print(textwrap.fill(read_madlib, drop_whitespace=False, replace_whitespace=False))
| <filename>madlib/main.py<gh_stars>0
#!/bin/python3
# Libraries
import sys
import array
import textwrap
# Variable Declaration
madlib_selection = "example.txt"
madlib_array = array.array('i')
copy_state = False
user_filler = ""
new_madlib = []
if len(sys.argv) != 1:
print(len(sys.argv))
if sys.argv[1] == "-":
print("This program takes the path to a madlib as an argument. Showing default now.")
## TODO: Add input validation, i.e. make sure the input is actully text.
else:
## TODO: Add pipe as input option.
madlib_selection = sys.argv[1]
with open(madlib_selection, 'r') as madlib:
read_madlib = madlib.read()
for i in range(read_madlib.count("#")//2):
first = read_madlib.index("#")
second = read_madlib.index("#", first+1)
replacement = input("Please give me " + read_madlib[first+1:second] + ":")
new_madlib = read_madlib[0:first] + replacement + read_madlib[second+1:]
read_madlib = new_madlib
print("\n\n\n")
print(textwrap.fill(read_madlib, drop_whitespace=False, replace_whitespace=False))
| en | 0.438125 | #!/bin/python3 # Libraries # Variable Declaration ## TODO: Add input validation, i.e. make sure the input is actully text. ## TODO: Add pipe as input option. | 3.602853 | 4 |
src/tests/control/test_devices.py | bsod85/pretix | 0 | 10057 | import pytest
from django.utils.timezone import now
from pretix.base.models import Device, Event, Organizer, Team, User
from pretix.base.models.devices import generate_api_token
@pytest.fixture
def organizer():
return Organizer.objects.create(name='Dummy', slug='dummy')
@pytest.fixture
def event(organizer):
event = Event.objects.create(
organizer=organizer, name='Dummy', slug='dummy',
date_from=now()
)
return event
@pytest.fixture
def device(organizer):
return organizer.devices.create(name='Cashdesk')
@pytest.fixture
def admin_user(admin_team):
u = User.objects.create_user('<EMAIL>', 'dummy')
admin_team.members.add(u)
return u
@pytest.fixture
def admin_team(organizer):
return Team.objects.create(organizer=organizer, can_change_organizer_settings=True, name='Admin team')
@pytest.mark.django_db
def test_list_of_devices(event, admin_user, client, device):
client.login(email='<EMAIL>', password='<PASSWORD>')
resp = client.get('/control/organizer/dummy/devices')
assert 'Cashdesk' in resp.rendered_content
@pytest.mark.django_db
def test_create_device(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
resp = client.post('/control/organizer/dummy/device/add', {
'name': 'Foo',
'limit_events': str(event.pk),
}, follow=True)
d = Device.objects.last()
assert d.name == 'Foo'
assert not d.all_events
assert list(d.limit_events.all()) == [event]
assert d.initialization_token in resp.content.decode()
@pytest.mark.django_db
def test_update_device(event, admin_user, admin_team, device, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
client.post('/control/organizer/dummy/device/{}/edit'.format(device.pk), {
'name': 'Cashdesk 2',
'limit_events': str(event.pk),
}, follow=True)
device.refresh_from_db()
assert device.name == 'Cashdesk 2'
assert not device.all_events
assert list(device.limit_events.all()) == [event]
@pytest.mark.django_db
def test_revoke_device(event, admin_user, admin_team, device, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
device.api_token = generate_api_token()
device.initialized = now()
device.save()
client.get('/control/organizer/dummy/device/{}/revoke'.format(device.pk))
client.post('/control/organizer/dummy/device/{}/revoke'.format(device.pk), {}, follow=True)
device.refresh_from_db()
assert device.revoked
| import pytest
from django.utils.timezone import now
from pretix.base.models import Device, Event, Organizer, Team, User
from pretix.base.models.devices import generate_api_token
@pytest.fixture
def organizer():
return Organizer.objects.create(name='Dummy', slug='dummy')
@pytest.fixture
def event(organizer):
event = Event.objects.create(
organizer=organizer, name='Dummy', slug='dummy',
date_from=now()
)
return event
@pytest.fixture
def device(organizer):
return organizer.devices.create(name='Cashdesk')
@pytest.fixture
def admin_user(admin_team):
u = User.objects.create_user('<EMAIL>', 'dummy')
admin_team.members.add(u)
return u
@pytest.fixture
def admin_team(organizer):
return Team.objects.create(organizer=organizer, can_change_organizer_settings=True, name='Admin team')
@pytest.mark.django_db
def test_list_of_devices(event, admin_user, client, device):
client.login(email='<EMAIL>', password='<PASSWORD>')
resp = client.get('/control/organizer/dummy/devices')
assert 'Cashdesk' in resp.rendered_content
@pytest.mark.django_db
def test_create_device(event, admin_user, admin_team, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
resp = client.post('/control/organizer/dummy/device/add', {
'name': 'Foo',
'limit_events': str(event.pk),
}, follow=True)
d = Device.objects.last()
assert d.name == 'Foo'
assert not d.all_events
assert list(d.limit_events.all()) == [event]
assert d.initialization_token in resp.content.decode()
@pytest.mark.django_db
def test_update_device(event, admin_user, admin_team, device, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
client.post('/control/organizer/dummy/device/{}/edit'.format(device.pk), {
'name': 'Cashdesk 2',
'limit_events': str(event.pk),
}, follow=True)
device.refresh_from_db()
assert device.name == 'Cashdesk 2'
assert not device.all_events
assert list(device.limit_events.all()) == [event]
@pytest.mark.django_db
def test_revoke_device(event, admin_user, admin_team, device, client):
client.login(email='<EMAIL>', password='<PASSWORD>')
device.api_token = generate_api_token()
device.initialized = now()
device.save()
client.get('/control/organizer/dummy/device/{}/revoke'.format(device.pk))
client.post('/control/organizer/dummy/device/{}/revoke'.format(device.pk), {}, follow=True)
device.refresh_from_db()
assert device.revoked
| none | 1 | 1.886409 | 2 |
|
mcp/augmentation/album.py | j20232/moco_image_pipeline | 5 | 10058 | <reponame>j20232/moco_image_pipeline
import numpy as np
from PIL import Image, ImageOps, ImageEnhance
import albumentations as A
# ndarray: H x W x C
def apply_aug(aug, image):
return aug(image=image)["image"]
# ----------------------------------- Blur -------------------------------------------
class RandomBlur():
def __init__(self, prob, blur_limit=9):
self.prob = np.clip(prob, 0.0, 1.0)
self.blur_limit = blur_limit
def __call__(self, img):
if np.random.uniform() < self.prob:
r = np.random.uniform()
if r < 0.4:
img = apply_aug(A.Blur(blur_limit=self.blur_limit, always_apply=True), img)
elif r < 0.6:
img = apply_aug(A.GaussianBlur(blur_limit=self.blur_limit, always_apply=True), img)
else:
img = apply_aug(A.MotionBlur(blur_limit=self.blur_limit, always_apply=True), img)
return img
# ----------------------------------- Noise -------------------------------------------
class GaussNoise():
def __init__(self, prob, var_limit=(0.0, 0.07)):
self.prob = np.clip(prob, 0.0, 1.0)
self.var_limit = var_limit
def __call__(self, img):
return apply_aug(A.GaussNoise(var_limit=self.var_limit, p=self.prob), img)
class MultiplicativeNoise():
def __init__(self, prob, var_limit=(0.6, 1.1)):
self.prob = np.clip(prob, 0.0, 1.0)
self.var_limit = var_limit
def __call__(self, img):
return apply_aug(A.MultiplicativeNoise(multiplier=self.var_limit, p=self.prob), img)
# ---------------------------------- Distortion ---------------------------------------
class GridDistortion():
def __init__(self, prob, num_steps=10, distort_limit=0.7):
self.prob = np.clip(prob, 0.0, 1.0)
self.num_steps = num_steps
self.distort_limit = distort_limit
def __call__(self, img):
return apply_aug(A.GridDistortion(p=self.prob, num_steps=self.num_steps,
distort_limit=self.distort_limit), img)
class ElasticTransform():
def __init__(self, prob, sigma=40, alpha=1, alpha_affine=15):
self.prob = np.clip(prob, 0.0, 1.0)
self.sigma = sigma
self.alpha = alpha
self.alpha_affine = alpha_affine
def __call__(self, img):
return apply_aug(A.ElasticTransform(p=self.prob, sigma=self.sigma,
alpha=self.alpha, alpha_affine=self.alpha_affine), img)
class ShiftScaleRotate():
def __init__(self, prob, shift_limit=0.0625, scale_limit=0.2, rotate_limit=20):
self.prob = prob
self.shift_limit = shift_limit
self.scale_limit = scale_limit
self.rotate_limit = rotate_limit
def __call__(self, img):
return apply_aug(A.ShiftScaleRotate(p=self.prob, shift_limit=self.shift_limit,
scale_limit=self.scale_limit,
rotate_limit=self.rotate_limit), img)
# ----------------------------------- Histogram ----------------------------------------
class HueSaturationValue():
def __init__(self, prob, hue_shift_limit=20, sat_shift_limit=40, val_shift_limit=100):
self.prob = np.clip(prob, 0.0, 1.0)
self.hue_shift_limit = hue_shift_limit
self.sat_shift_limit = sat_shift_limit
self.val_shift_limit = val_shift_limit
def __call__(self, img):
out = img if img.dtype == "uint8" else (img * 255).astype(np.uint8)
out = apply_aug(A.HueSaturationValue(p=self.prob, hue_shift_limit=self.hue_shift_limit,
sat_shift_limit=self.sat_shift_limit,
val_shift_limit=self.val_shift_limit), out)
return out if img.dtype == "uint8" else (out / 255).astype(np.float64)
class RandomBrightnessContrast():
def __init__(self, prob, brightness_limit=2.0, contrast_limit=0.6):
self.prob = np.clip(prob, 0.0, 1.0)
self.brightness_limit = brightness_limit
self.contrast_limit = contrast_limit
def __call__(self, img):
return apply_aug(A.RandomBrightnessContrast(p=self.prob,
brightness_limit=self.brightness_limit,
contrast_limit=self.contrast_limit,
brightness_by_max=False,
), img)
class RandomCLAHE():
def __init__(self, prob, clip_limit=40.0, tile_grid_size=(16, 16)):
self.prob = np.clip(prob, 0.0, 1.0)
self.clip_limit = clip_limit
self.tile_grid_size = tile_grid_size
def __call__(self, img):
out = img if img.dtype == "uint8" else (img * 255).astype(np.uint8)
out = apply_aug(A.CLAHE(p=self.prob, clip_limit=self.clip_limit,
tile_grid_size=self.tile_grid_size), out)
return out if img.dtype == "uint8" else (out / 255).astype(np.float64)
# ------------------------------------- Removal ------------------------------------------
class CoarseDropout():
def __init__(self, prob, max_holes=10, max_height=12, max_width=12):
self.prob = np.clip(prob, 0.0, 1.0)
self.max_holes = max_holes
self.max_height = max_height
self.max_width = max_width
def __call__(self, img):
return apply_aug(A.CoarseDropout(p=self.prob, max_holes=self.max_holes,
max_height=self.max_height, max_width=self.max_width,
fill_value=np.median(img)), img)
# ------------------------------------------- Augmix -------------------------------------------
# Reference: https://www.kaggle.com/haqishen/augmix-based-on-albumentations
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / 10)
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval.
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / 10.
def sample_level(n):
return np.random.uniform(low=0.1, high=n)
def autocontrast(pil_img, _):
return ImageOps.autocontrast(pil_img)
def equalize(pil_img, _):
return ImageOps.equalize(pil_img)
def posterize(pil_img, level):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(pil_img, level):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(pil_img, level):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(pil_img, level):
level = int_parameter(sample_level(level), pil_img.size[0] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(pil_img, level):
level = int_parameter(sample_level(level), pil_img.size[0] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
# operation that overlaps with ImageNet-C's test set
def color(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Color(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def contrast(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Contrast(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def brightness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Brightness(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def sharpness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Sharpness(pil_img).enhance(level)
def normalize(image):
"""Normalize input image channel-wise to zero mean and unit variance."""
return image - 127
def apply_op(image, op, severity):
# image = np.clip(image, 0, 255)
pil_img = Image.fromarray(image) # Convert to PIL.Image
pil_img = op(pil_img, severity)
return np.asarray(pil_img)
def augment_and_mix(image, severity=3, width=3, depth=-1, alpha=1.):
"""Perform AugMix augmentations and compute mixture.
Args:
image: Raw input image as float32 np.ndarray of shape (h, w, c)
severity: Severity of underlying augmentation operators (between 1 to 10).
width: Width of augmentation chain
depth: Depth of augmentation chain. -1 enables stochastic depth uniformly
from [1, 3]
alpha: Probability coefficient for Beta and Dirichlet distributions.
Returns:
mixed: Augmented and mixed image.
"""
augmentations = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y
]
ws = np.float32(np.random.dirichlet([alpha] * width))
m = np.float32(np.random.beta(alpha, alpha))
mix = np.zeros_like(image).astype(np.float32)
for i in range(width):
image_aug = image.copy()
depth = depth if depth > 0 else np.random.randint(1, 4)
for _ in range(depth):
op = np.random.choice(augmentations)
image_aug = apply_op(image_aug, op, severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * image_aug
# mix += ws[i] * normalize(image_aug)
mixed = (1 - m) * image + m * mix
# mixed = (1 - m) * normalize(image) + m * mix
return mixed
class RandomAugMix():
def __init__(self, prob=0.1, severity=2, width=3, depth=2, alpha=1.):
self.prob = prob
self.severity = severity
self.width = width
self.depth = depth
self.alpha = alpha
def __call__(self, img):
if np.random.uniform() > self.prob:
return img
tmp = (img * 255).astype(np.uint8) if img.dtype != "uint8" else img
out = augment_and_mix(tmp, self.severity, self.width, self.depth, self.alpha)
if type(img) is np.ndarray:
if img.dtype != "uint8":
out = (out / 255).astype(np.float64)
return out
| import numpy as np
from PIL import Image, ImageOps, ImageEnhance
import albumentations as A
# ndarray: H x W x C
def apply_aug(aug, image):
return aug(image=image)["image"]
# ----------------------------------- Blur -------------------------------------------
class RandomBlur():
def __init__(self, prob, blur_limit=9):
self.prob = np.clip(prob, 0.0, 1.0)
self.blur_limit = blur_limit
def __call__(self, img):
if np.random.uniform() < self.prob:
r = np.random.uniform()
if r < 0.4:
img = apply_aug(A.Blur(blur_limit=self.blur_limit, always_apply=True), img)
elif r < 0.6:
img = apply_aug(A.GaussianBlur(blur_limit=self.blur_limit, always_apply=True), img)
else:
img = apply_aug(A.MotionBlur(blur_limit=self.blur_limit, always_apply=True), img)
return img
# ----------------------------------- Noise -------------------------------------------
class GaussNoise():
def __init__(self, prob, var_limit=(0.0, 0.07)):
self.prob = np.clip(prob, 0.0, 1.0)
self.var_limit = var_limit
def __call__(self, img):
return apply_aug(A.GaussNoise(var_limit=self.var_limit, p=self.prob), img)
class MultiplicativeNoise():
def __init__(self, prob, var_limit=(0.6, 1.1)):
self.prob = np.clip(prob, 0.0, 1.0)
self.var_limit = var_limit
def __call__(self, img):
return apply_aug(A.MultiplicativeNoise(multiplier=self.var_limit, p=self.prob), img)
# ---------------------------------- Distortion ---------------------------------------
class GridDistortion():
def __init__(self, prob, num_steps=10, distort_limit=0.7):
self.prob = np.clip(prob, 0.0, 1.0)
self.num_steps = num_steps
self.distort_limit = distort_limit
def __call__(self, img):
return apply_aug(A.GridDistortion(p=self.prob, num_steps=self.num_steps,
distort_limit=self.distort_limit), img)
class ElasticTransform():
def __init__(self, prob, sigma=40, alpha=1, alpha_affine=15):
self.prob = np.clip(prob, 0.0, 1.0)
self.sigma = sigma
self.alpha = alpha
self.alpha_affine = alpha_affine
def __call__(self, img):
return apply_aug(A.ElasticTransform(p=self.prob, sigma=self.sigma,
alpha=self.alpha, alpha_affine=self.alpha_affine), img)
class ShiftScaleRotate():
def __init__(self, prob, shift_limit=0.0625, scale_limit=0.2, rotate_limit=20):
self.prob = prob
self.shift_limit = shift_limit
self.scale_limit = scale_limit
self.rotate_limit = rotate_limit
def __call__(self, img):
return apply_aug(A.ShiftScaleRotate(p=self.prob, shift_limit=self.shift_limit,
scale_limit=self.scale_limit,
rotate_limit=self.rotate_limit), img)
# ----------------------------------- Histogram ----------------------------------------
class HueSaturationValue():
def __init__(self, prob, hue_shift_limit=20, sat_shift_limit=40, val_shift_limit=100):
self.prob = np.clip(prob, 0.0, 1.0)
self.hue_shift_limit = hue_shift_limit
self.sat_shift_limit = sat_shift_limit
self.val_shift_limit = val_shift_limit
def __call__(self, img):
out = img if img.dtype == "uint8" else (img * 255).astype(np.uint8)
out = apply_aug(A.HueSaturationValue(p=self.prob, hue_shift_limit=self.hue_shift_limit,
sat_shift_limit=self.sat_shift_limit,
val_shift_limit=self.val_shift_limit), out)
return out if img.dtype == "uint8" else (out / 255).astype(np.float64)
class RandomBrightnessContrast():
def __init__(self, prob, brightness_limit=2.0, contrast_limit=0.6):
self.prob = np.clip(prob, 0.0, 1.0)
self.brightness_limit = brightness_limit
self.contrast_limit = contrast_limit
def __call__(self, img):
return apply_aug(A.RandomBrightnessContrast(p=self.prob,
brightness_limit=self.brightness_limit,
contrast_limit=self.contrast_limit,
brightness_by_max=False,
), img)
class RandomCLAHE():
def __init__(self, prob, clip_limit=40.0, tile_grid_size=(16, 16)):
self.prob = np.clip(prob, 0.0, 1.0)
self.clip_limit = clip_limit
self.tile_grid_size = tile_grid_size
def __call__(self, img):
out = img if img.dtype == "uint8" else (img * 255).astype(np.uint8)
out = apply_aug(A.CLAHE(p=self.prob, clip_limit=self.clip_limit,
tile_grid_size=self.tile_grid_size), out)
return out if img.dtype == "uint8" else (out / 255).astype(np.float64)
# ------------------------------------- Removal ------------------------------------------
class CoarseDropout():
def __init__(self, prob, max_holes=10, max_height=12, max_width=12):
self.prob = np.clip(prob, 0.0, 1.0)
self.max_holes = max_holes
self.max_height = max_height
self.max_width = max_width
def __call__(self, img):
return apply_aug(A.CoarseDropout(p=self.prob, max_holes=self.max_holes,
max_height=self.max_height, max_width=self.max_width,
fill_value=np.median(img)), img)
# ------------------------------------------- Augmix -------------------------------------------
# Reference: https://www.kaggle.com/haqishen/augmix-based-on-albumentations
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / 10)
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval.
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled to
level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / 10.
def sample_level(n):
return np.random.uniform(low=0.1, high=n)
def autocontrast(pil_img, _):
return ImageOps.autocontrast(pil_img)
def equalize(pil_img, _):
return ImageOps.equalize(pil_img)
def posterize(pil_img, level):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(pil_img, level):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(pil_img, level):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(pil_img, level):
level = int_parameter(sample_level(level), pil_img.size[0] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(pil_img, level):
level = int_parameter(sample_level(level), pil_img.size[0] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size,
Image.AFFINE, (1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
# operation that overlaps with ImageNet-C's test set
def color(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Color(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def contrast(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Contrast(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def brightness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Brightness(pil_img).enhance(level)
# operation that overlaps with ImageNet-C's test set
def sharpness(pil_img, level):
level = float_parameter(sample_level(level), 1.8) + 0.1
return ImageEnhance.Sharpness(pil_img).enhance(level)
def normalize(image):
"""Normalize input image channel-wise to zero mean and unit variance."""
return image - 127
def apply_op(image, op, severity):
# image = np.clip(image, 0, 255)
pil_img = Image.fromarray(image) # Convert to PIL.Image
pil_img = op(pil_img, severity)
return np.asarray(pil_img)
def augment_and_mix(image, severity=3, width=3, depth=-1, alpha=1.):
"""Perform AugMix augmentations and compute mixture.
Args:
image: Raw input image as float32 np.ndarray of shape (h, w, c)
severity: Severity of underlying augmentation operators (between 1 to 10).
width: Width of augmentation chain
depth: Depth of augmentation chain. -1 enables stochastic depth uniformly
from [1, 3]
alpha: Probability coefficient for Beta and Dirichlet distributions.
Returns:
mixed: Augmented and mixed image.
"""
augmentations = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y
]
ws = np.float32(np.random.dirichlet([alpha] * width))
m = np.float32(np.random.beta(alpha, alpha))
mix = np.zeros_like(image).astype(np.float32)
for i in range(width):
image_aug = image.copy()
depth = depth if depth > 0 else np.random.randint(1, 4)
for _ in range(depth):
op = np.random.choice(augmentations)
image_aug = apply_op(image_aug, op, severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * image_aug
# mix += ws[i] * normalize(image_aug)
mixed = (1 - m) * image + m * mix
# mixed = (1 - m) * normalize(image) + m * mix
return mixed
class RandomAugMix():
def __init__(self, prob=0.1, severity=2, width=3, depth=2, alpha=1.):
self.prob = prob
self.severity = severity
self.width = width
self.depth = depth
self.alpha = alpha
def __call__(self, img):
if np.random.uniform() > self.prob:
return img
tmp = (img * 255).astype(np.uint8) if img.dtype != "uint8" else img
out = augment_and_mix(tmp, self.severity, self.width, self.depth, self.alpha)
if type(img) is np.ndarray:
if img.dtype != "uint8":
out = (out / 255).astype(np.float64)
return out | en | 0.722461 | # ndarray: H x W x C # ----------------------------------- Blur ------------------------------------------- # ----------------------------------- Noise ------------------------------------------- # ---------------------------------- Distortion --------------------------------------- # ----------------------------------- Histogram ---------------------------------------- # ------------------------------------- Removal ------------------------------------------ # ------------------------------------------- Augmix ------------------------------------------- # Reference: https://www.kaggle.com/haqishen/augmix-based-on-albumentations Helper function to scale `val` between 0 and maxval . Args: level: Level of the operation that will be between [0, `PARAMETER_MAX`]. maxval: Maximum value that the operation can have. This will be scaled to level/PARAMETER_MAX. Returns: An int that results from scaling `maxval` according to `level`. Helper function to scale `val` between 0 and maxval. Args: level: Level of the operation that will be between [0, `PARAMETER_MAX`]. maxval: Maximum value that the operation can have. This will be scaled to level/PARAMETER_MAX. Returns: A float that results from scaling `maxval` according to `level`. # operation that overlaps with ImageNet-C's test set # operation that overlaps with ImageNet-C's test set # operation that overlaps with ImageNet-C's test set # operation that overlaps with ImageNet-C's test set Normalize input image channel-wise to zero mean and unit variance. # image = np.clip(image, 0, 255) # Convert to PIL.Image Perform AugMix augmentations and compute mixture. Args: image: Raw input image as float32 np.ndarray of shape (h, w, c) severity: Severity of underlying augmentation operators (between 1 to 10). width: Width of augmentation chain depth: Depth of augmentation chain. -1 enables stochastic depth uniformly from [1, 3] alpha: Probability coefficient for Beta and Dirichlet distributions. Returns: mixed: Augmented and mixed image. # Preprocessing commutes since all coefficients are convex # mix += ws[i] * normalize(image_aug) # mixed = (1 - m) * normalize(image) + m * mix | 2.565988 | 3 |
website/util/sanitize.py | bdyetton/prettychart | 0 | 10059 | # -*- coding: utf-8 -*-
import bleach
import json
def strip_html(unclean):
"""Sanitize a string, removing (as opposed to escaping) HTML tags
:param unclean: A string to be stripped of HTML tags
:return: stripped string
:rtype: str
"""
return bleach.clean(unclean, strip=True, tags=[], attributes=[], styles=[])
def clean_tag(data):
"""Format as a valid Tag
:param data: A string to be cleaned
:return: cleaned string
:rtype: str
"""
# TODO: make this a method of Tag?
return escape_html(data).replace('"', '"').replace("'", ''')
def is_iterable_but_not_string(obj):
"""Return True if ``obj`` is an iterable object that isn't a string."""
return (hasattr(obj, '__iter__') and not hasattr(obj, 'strip'))
def escape_html(data):
"""Escape HTML characters in data.
:param data: A string, dict, or list to clean of HTML characters
:return: A cleaned object
:rtype: str or list or dict
"""
if isinstance(data, dict):
return {
key: escape_html(value)
for (key, value) in data.iteritems()
}
if is_iterable_but_not_string(data):
return [
escape_html(value)
for value in data
]
if isinstance(data, basestring):
return bleach.clean(data)
return data
def assert_clean(data):
"""Ensure that data is cleaned
:raise: AssertionError
"""
def _ensure_clean(value):
if value != bleach.clean(value):
raise ValueError
return escape_html(data)
# TODO: Remove safe_unescape_html when mako html safe comes in
def safe_unescape_html(value):
"""
Return data without html escape characters.
:param value: A string, dict, or list
:return: A string or list or dict without html escape characters
"""
safe_characters = {
'&': '&',
'<': '<',
'>': '>',
}
if isinstance(value, dict):
return {
key: safe_unescape_html(value)
for (key, value) in value.iteritems()
}
if is_iterable_but_not_string(value):
return [
safe_unescape_html(each)
for each in value
]
if isinstance(value, basestring):
for escape_sequence, character in safe_characters.items():
value = value.replace(escape_sequence, character)
return value
return value
def safe_json(value):
"""
Dump a string to JSON in a manner that can be used for JS strings in mako templates.
Providing additional forward-slash escaping to prevent injection of closing markup in strings. See:
http://benalpert.com/2012/08/03/preventing-xss-json.html
:param value: A string to be converted
:return: A JSON-formatted string that explicitly escapes forward slashes when needed
"""
return json.dumps(value).replace('</', '<\\/') # Fix injection of closing markup in strings
| # -*- coding: utf-8 -*-
import bleach
import json
def strip_html(unclean):
"""Sanitize a string, removing (as opposed to escaping) HTML tags
:param unclean: A string to be stripped of HTML tags
:return: stripped string
:rtype: str
"""
return bleach.clean(unclean, strip=True, tags=[], attributes=[], styles=[])
def clean_tag(data):
"""Format as a valid Tag
:param data: A string to be cleaned
:return: cleaned string
:rtype: str
"""
# TODO: make this a method of Tag?
return escape_html(data).replace('"', '"').replace("'", ''')
def is_iterable_but_not_string(obj):
"""Return True if ``obj`` is an iterable object that isn't a string."""
return (hasattr(obj, '__iter__') and not hasattr(obj, 'strip'))
def escape_html(data):
"""Escape HTML characters in data.
:param data: A string, dict, or list to clean of HTML characters
:return: A cleaned object
:rtype: str or list or dict
"""
if isinstance(data, dict):
return {
key: escape_html(value)
for (key, value) in data.iteritems()
}
if is_iterable_but_not_string(data):
return [
escape_html(value)
for value in data
]
if isinstance(data, basestring):
return bleach.clean(data)
return data
def assert_clean(data):
"""Ensure that data is cleaned
:raise: AssertionError
"""
def _ensure_clean(value):
if value != bleach.clean(value):
raise ValueError
return escape_html(data)
# TODO: Remove safe_unescape_html when mako html safe comes in
def safe_unescape_html(value):
"""
Return data without html escape characters.
:param value: A string, dict, or list
:return: A string or list or dict without html escape characters
"""
safe_characters = {
'&': '&',
'<': '<',
'>': '>',
}
if isinstance(value, dict):
return {
key: safe_unescape_html(value)
for (key, value) in value.iteritems()
}
if is_iterable_but_not_string(value):
return [
safe_unescape_html(each)
for each in value
]
if isinstance(value, basestring):
for escape_sequence, character in safe_characters.items():
value = value.replace(escape_sequence, character)
return value
return value
def safe_json(value):
"""
Dump a string to JSON in a manner that can be used for JS strings in mako templates.
Providing additional forward-slash escaping to prevent injection of closing markup in strings. See:
http://benalpert.com/2012/08/03/preventing-xss-json.html
:param value: A string to be converted
:return: A JSON-formatted string that explicitly escapes forward slashes when needed
"""
return json.dumps(value).replace('</', '<\\/') # Fix injection of closing markup in strings
| en | 0.651422 | # -*- coding: utf-8 -*- Sanitize a string, removing (as opposed to escaping) HTML tags :param unclean: A string to be stripped of HTML tags :return: stripped string :rtype: str Format as a valid Tag :param data: A string to be cleaned :return: cleaned string :rtype: str # TODO: make this a method of Tag? #39') Return True if ``obj`` is an iterable object that isn't a string. Escape HTML characters in data. :param data: A string, dict, or list to clean of HTML characters :return: A cleaned object :rtype: str or list or dict Ensure that data is cleaned :raise: AssertionError # TODO: Remove safe_unescape_html when mako html safe comes in Return data without html escape characters. :param value: A string, dict, or list :return: A string or list or dict without html escape characters Dump a string to JSON in a manner that can be used for JS strings in mako templates. Providing additional forward-slash escaping to prevent injection of closing markup in strings. See: http://benalpert.com/2012/08/03/preventing-xss-json.html :param value: A string to be converted :return: A JSON-formatted string that explicitly escapes forward slashes when needed # Fix injection of closing markup in strings | 3.376708 | 3 |
mainTrain.py | PolarizedLightFieldMicroscopy/LFMNet2 | 0 | 10060 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils import data
from torch import optim
import torchvision.models as models
from torch.autograd import Variable
import torchvision as tv
import random
import math
import time
from datetime import datetime
import os
import argparse
import subprocess
from util.LFUtil import *
import numpy as np
from networks.LFMNet import LFMNet
def main(args=None):
# # Arguments
# parser = argparse.ArgumentParser()
# # Number of epochs
# parser.add_argument('--epochs', type=int, default=1000)
# # Validate every n percentage of the data
# parser.add_argument('--valEvery', type=float, default=0.25)
# # Image indices to use for training and validation
# parser.add_argument('--imagesToUse', nargs='+', type=int, default=list(range(0,5,1)))
# # List of GPUs to use: 0 1 2 for example
# parser.add_argument('--GPUs', nargs='+', type=int, default=None)
# # Batch size
# parser.add_argument('--batchSize', type=int, default=128)
# # Perentage of the data to use for validation, from 0 to 1
# parser.add_argument('--validationSplit', type=float, default=0.1)
# # Bias initialization value
# parser.add_argument('--biasVal', type=float, default=0.1)
# # Learning rate
# parser.add_argument('--learningRate', type=float, default=0.001)
# # Use bias flag
# parser.add_argument('--useBias', type=str2bool, default=True)
# # Use skip connections flag
# parser.add_argument('--useSkipCon', type=str2bool, default=False)
# # User selected random seed
# parser.add_argument('--randomSeed', type=int, default=None)
# # fov of input or neighboarhood around lenslet to reconstruct
# parser.add_argument('--fovInput', type=int, default=9)
# # nT number of lenslets to reconstruct simultaneously use at training time
# parser.add_argument('--neighShape', type=int, default=3)
# # Flag to use shallow or large U-net
# parser.add_argument('--useShallowUnet', type=str2bool, default=True)
# # Lower threshold of GT stacks, to get rid of autofluorescence
# parser.add_argument('--ths', type=float, default=0.03)
# # Path to dataset
# parser.add_argument('--datasetPath', nargs='?', default="BrainLFMConfocalDataset/Brain_40x_64Depths_362imgs.h5")
# # Path to directory where models and tensorboard logs are stored
# parser.add_argument('--outputPath', nargs='?', default="runs/")
# # Prefix for current output folder
# parser.add_argument('--outputPrefix', nargs='?', default="")
# # Path to model in case of continuing a training
# parser.add_argument('--checkpointPath', nargs='?', default=None)
# args = parser.parse_args()
nImgs = len(args.imagesToUse)
# Setup multithreading
num_workers = getThreads()
if num_workers!=0:
torch.set_num_threads(num_workers)
if not torch.cuda.is_available():
print("GPU initialization error")
exit(-1)
if torch.cuda.is_available():
print ("Cuda is available")
device_id = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device_id)
print("Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with "
"%.1fGb total memory.\n" %
(torch.cuda.device_count(),
device_id,
gpu_properties.name,
gpu_properties.major,
gpu_properties.minor,
gpu_properties.total_memory / 1e9))
# Select GPUs to use
args.GPUs = list(range(torch.cuda.device_count())) if args.GPUs is None else args.GPUs
print('Using GPUs: ' + str(args.GPUs))
device_ids = args.GPUs
# Set common random seed
if args.randomSeed is not None:
np.random.seed(args.randomSeed)
torch.manual_seed(args.randomSeed)
# Load checkpoint if provided
if args.checkpointPath is not None:
checkpointPath = args.checkpointPath
checkpoint = torch.load(checkpointPath)
# overwrite args
args = checkpoint['args']
args.checkpointPath = checkpointPath
# set Device to use
device = torch.device("cuda:"+str(device_ids[0]) if torch.cuda.is_available() else "cpu")
# Create unique label
today = datetime.now()
# Get commit number
# label = subprocess.check_output(["git", "describe", "--always"]).strip()
#specific to MBL lab workstation
label = subprocess.check_output(["C:/Program Files/git/bin/git", "describe", "--always"]).strip()
comment = today.strftime('%Y_%m_%d__%H%M%S') + "_"+ str(args.useBias) +"B_"+str(args.biasVal)+"bias_" + str(nImgs) + \
"I_"+ str(args.batchSize)+"BS_"+str(args.useSkipCon)+"Sk_" + str(args.fovInput) + "FOV_" + str(args.neighShape) + "nT_" \
+ str(args.ths) + "ths_" + str(label.decode("utf-8") ) + "_commit__" + args.outputPrefix
# Create output folder
save_folder = args.outputPath + "/" + comment
# If asked to continue a training, save in the same folder
if args.checkpointPath is not None:
save_folder = os.path.split(args.checkpointPath)[0]
print(save_folder)
# Create summary writer to log stuff
writer = SummaryWriter(log_dir=save_folder)
writer.add_text('Description',comment,0)
writer.flush()
# Load dataset
all_data = Dataset(args.datasetPath, args.randomSeed, \
fov=args.fovInput, neighShape=args.neighShape, img_indices=args.imagesToUse, get_full_imgs=False, center_region=None)
# Split validation and testing
train_size = int((1 - args.validationSplit) * len(all_data))
test_size = len(all_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(all_data, [train_size, test_size])
# Create data loaders
train_dataset = data.DataLoader(train_dataset, batch_size=args.batchSize,
shuffle=True, num_workers=num_workers, pin_memory=True)
test_dataset = data.DataLoader(test_dataset, batch_size=args.batchSize,
shuffle=True, num_workers=num_workers, pin_memory=True)
validate_every = np.round(len(train_dataset)*args.valEvery)
# Get Dataset information
nDepths = all_data.get_n_depths()
volShape, LFshape = all_data.__shape__()
LFshape = LFshape[0:4]
lateralTile = int(math.sqrt(nDepths))
# Find normalization values
maxInputTrain, maxVolumeTrain = all_data.get_max()
maxInputTest, maxVolumeTest = all_data.get_max()
# Create network
net = LFMNet(nDepths, args.useBias, args.useSkipCon, LFshape, LFfov=args.fovInput, use_small_unet=args.useShallowUnet).to(device)
optimizer = optim.Adam(net.parameters(), lr=args.learningRate)
lossFunction = nn.L1Loss()
# Create SSIM criteria
ssim = SSIM()
ssim.eval()
# Init bias and weights if needed
if args.useBias:
def bias_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d):
if m.bias is not None:
nn.init.constant_(m.bias.data, args.biasVal)
nn.init.kaiming_normal_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
nn.init.constant_(m.bias.data, args.biasVal)
nn.init.kaiming_normal_(m.weight)
net.apply(bias_init)
# Load network from checkpoint
if args.checkpointPath is not None:
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epochStart = checkpoint['epoch']
epochs = args.epochs + epochStart
train_loss = checkpoint['loss']
# Start distributed data parallel, as it's faster than DataParallel
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '1234'+str(device_ids[0])
torch.distributed.init_process_group(backend="nccl", rank=0, world_size=1)
# Move network to distributed data parallel
net = nn.parallel.DistributedDataParallel(net, device_ids=args.GPUs, output_device=args.GPUs[0]).to(device)
# timers
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
global_it_counter = 0
# define indices to grab for tensorboard visualization
indices_to_show = torch.randperm(test_size)[0:8]
# Init arrays to store losses
train_losses, test_losses = [], []
test_loss = 0
epochStart = 0
# Start training
for epoch in range(epochStart, args.epochs):
net.train()
torch.set_grad_enabled(True)
torch.cuda.empty_cache()
train_loss = 0
print('Training')
global_it_counter = 0
for nBatch,(inputs,labels) in enumerate(train_dataset):
# compute current iteration
curr_it = epoch*len(train_dataset) + nBatch
# start timer
start.record()
print('ep: ' + str(epoch) + ' ' + str(nBatch+1) + '/' + str(len(train_dataset)) + ' currIt: ' + str(curr_it))
optimizer.zero_grad()
# load data to gpu and normalize from 0 to 1
inputGPU = inputs.float().to(device) / maxInputTest
outputsGT = labels.float().to(device) / maxVolumeTrain
# Threshold GT to get rid of autofluorescence
if args.ths!=0:
outputsGT = imadjust(outputsGT, args.ths,outputsGT.max(), outputsGT.min(), outputsGT.max())
# Predict
outputsVol = net(inputGPU)
loss = lossFunction(outputsGT,outputsVol)
loss.backward()
train_loss += loss.item() / nDepths
optimizer.step()
global_it_counter += inputs.shape[0]
# Record training time
end.record()
torch.cuda.synchronize()
end_time = start.elapsed_time(end)
# Compute time per sample
elapsed_time = end_time/inputs.shape[0]
# Check if validation is required
if nBatch%validate_every==0:
print(comment)
# Write training images to tensorboard
lastBatchSize = min(outputsGT.shape[0],4)
gridOut2 = torch.cat((outputsGT[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach(), outputsVol[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach()), dim=0)
gridOut2 = tv.utils.make_grid(gridOut2, normalize=True, scale_each=False)
# Select some images in the batch for showing
indices_to_display = torch.randperm(inputGPU.shape[0])[0:4]
outputsGT = F.interpolate(outputsGT[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
outputsVol = F.interpolate(outputsVol[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
inputGPU = inputGPU[indices_to_display,:,:,:,:,:]
currPred = convert3Dto2DTiles(outputsVol, [lateralTile, lateralTile])
currGT = convert3Dto2DTiles(outputsGT, [lateralTile, lateralTile])
inputGrid = LF2Spatial(inputGPU, inputGPU.shape[2:])
gridPred = tv.utils.make_grid(currPred,normalize=True, scale_each=False)
gridGT = tv.utils.make_grid(currGT,normalize=True, scale_each=False)
gridInput = tv.utils.make_grid(inputGrid,normalize=True, scale_each=False)
gt = outputsGT[0,:,:,:,:].sum(3).repeat(3,1,1)
gt /= gt.max()
# Write to tensorboard
writer.add_image('z_proj_train',gt,curr_it)
writer.add_image('images_train_YZ_projection', gridOut2, curr_it)
writer.add_image('outputRGB_train', gridPred, curr_it)
writer.add_image('outputRGB_train_GT', gridGT, curr_it)
writer.add_image('input_train', gridInput, curr_it)
writer.add_scalar('Loss/train', train_loss/global_it_counter, curr_it)
writer.add_scalar('times/train', elapsed_time, curr_it)
# Restart
train_loss = 0.0
global_it_counter = 0
print('Validating')
net.eval()
with torch.no_grad():
avg_psnr = 0
avg_ssim = 0
test_loss = 0
start.record()
for nBatch,(inputs,labels) in enumerate(test_dataset):
inputGPU = inputs.float().to(device) / maxInputTest
outputsGT = labels.float().to(device) / maxVolumeTrain
# Threshold GT to get rid of autofluorescence
outputsGT = imadjust(outputsGT,args.ths,outputsGT.max(), outputsGT.min(), outputsGT.max())
outputsVol = net(inputGPU)
loss = lossFunction(outputsGT,outputsVol)
test_loss += loss.item() / nDepths
# Compute PSNR
lossMSE = nn.functional.mse_loss(outputsVol.to(device).detach(), outputsGT.to(device).detach())
avg_psnr += 10 * math.log10(1 / lossMSE.item())
# Compute ssim
avg_ssim += ssim(outputsVol[:,0,:,:,:].permute(0,3,1,2).contiguous().detach().to(device), outputsGT[:,0,:,:,:].permute(0,3,1,2).contiguous().detach().to(device)).sum()
end.record()
torch.cuda.synchronize()
lastBatchSize = min(outputsGT.shape[0],4)
gridOut2 = torch.cat((outputsGT[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach(), outputsVol[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach()), dim=0)
gridOut2 = tv.utils.make_grid(gridOut2, normalize=True, scale_each=False)
# process some for showing
indices_to_display = torch.randperm(inputGPU.shape[0])[0:lastBatchSize]
outputsGT = F.interpolate(outputsGT[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
outputsVol = F.interpolate(outputsVol[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
inputGPU = inputGPU[indices_to_display,:,:,:,:,:]
currPred = convert3Dto2DTiles(outputsVol, [lateralTile, lateralTile])
currGT = convert3Dto2DTiles(outputsGT, [lateralTile, lateralTile])
inputGrid = LF2Spatial(inputGPU, inputGPU.shape[2:])
gridPred = tv.utils.make_grid(currPred,normalize=True, scale_each=False)
gridGT = tv.utils.make_grid(currGT,normalize=True, scale_each=False)
gridInput = tv.utils.make_grid(inputGrid,normalize=True, scale_each=False)
# Write to tensorboard
writer.add_image('images_val_YZ_projection', gridOut2, curr_it)
writer.add_image('outputRGB_test', gridPred, curr_it)
writer.add_image('outputRGB_test_GT', gridGT, curr_it)
writer.add_image('input_test', gridInput, curr_it)
writer.add_scalar('Loss/test', test_loss/len(test_dataset), curr_it)
writer.add_scalar('Loss/psnr_val', avg_psnr/len(test_dataset), curr_it)
writer.add_scalar('Loss/ssim_val', avg_ssim/len(test_dataset), curr_it)
writer.add_scalar('LearningRate', args.learningRate, curr_it)
writer.add_scalar('times/val', start.elapsed_time(end)/test_size, curr_it)
net.train()
if epoch%2==0:
torch.save({
'epoch': epoch,
'args' : args,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': train_loss,
'dataset_path': args.datasetPath},
save_folder + '/model_'+str(epoch))
print(f"Epoch {epoch + 1}/{args.epochs}.. "
f"Train loss: {train_loss / len(train_dataset):.7f}.. "
f"Test loss: {test_loss / len(test_dataset):.7f}.. ")
if __name__ == '__main__':
main() | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils import data
from torch import optim
import torchvision.models as models
from torch.autograd import Variable
import torchvision as tv
import random
import math
import time
from datetime import datetime
import os
import argparse
import subprocess
from util.LFUtil import *
import numpy as np
from networks.LFMNet import LFMNet
def main(args=None):
# # Arguments
# parser = argparse.ArgumentParser()
# # Number of epochs
# parser.add_argument('--epochs', type=int, default=1000)
# # Validate every n percentage of the data
# parser.add_argument('--valEvery', type=float, default=0.25)
# # Image indices to use for training and validation
# parser.add_argument('--imagesToUse', nargs='+', type=int, default=list(range(0,5,1)))
# # List of GPUs to use: 0 1 2 for example
# parser.add_argument('--GPUs', nargs='+', type=int, default=None)
# # Batch size
# parser.add_argument('--batchSize', type=int, default=128)
# # Perentage of the data to use for validation, from 0 to 1
# parser.add_argument('--validationSplit', type=float, default=0.1)
# # Bias initialization value
# parser.add_argument('--biasVal', type=float, default=0.1)
# # Learning rate
# parser.add_argument('--learningRate', type=float, default=0.001)
# # Use bias flag
# parser.add_argument('--useBias', type=str2bool, default=True)
# # Use skip connections flag
# parser.add_argument('--useSkipCon', type=str2bool, default=False)
# # User selected random seed
# parser.add_argument('--randomSeed', type=int, default=None)
# # fov of input or neighboarhood around lenslet to reconstruct
# parser.add_argument('--fovInput', type=int, default=9)
# # nT number of lenslets to reconstruct simultaneously use at training time
# parser.add_argument('--neighShape', type=int, default=3)
# # Flag to use shallow or large U-net
# parser.add_argument('--useShallowUnet', type=str2bool, default=True)
# # Lower threshold of GT stacks, to get rid of autofluorescence
# parser.add_argument('--ths', type=float, default=0.03)
# # Path to dataset
# parser.add_argument('--datasetPath', nargs='?', default="BrainLFMConfocalDataset/Brain_40x_64Depths_362imgs.h5")
# # Path to directory where models and tensorboard logs are stored
# parser.add_argument('--outputPath', nargs='?', default="runs/")
# # Prefix for current output folder
# parser.add_argument('--outputPrefix', nargs='?', default="")
# # Path to model in case of continuing a training
# parser.add_argument('--checkpointPath', nargs='?', default=None)
# args = parser.parse_args()
nImgs = len(args.imagesToUse)
# Setup multithreading
num_workers = getThreads()
if num_workers!=0:
torch.set_num_threads(num_workers)
if not torch.cuda.is_available():
print("GPU initialization error")
exit(-1)
if torch.cuda.is_available():
print ("Cuda is available")
device_id = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device_id)
print("Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with "
"%.1fGb total memory.\n" %
(torch.cuda.device_count(),
device_id,
gpu_properties.name,
gpu_properties.major,
gpu_properties.minor,
gpu_properties.total_memory / 1e9))
# Select GPUs to use
args.GPUs = list(range(torch.cuda.device_count())) if args.GPUs is None else args.GPUs
print('Using GPUs: ' + str(args.GPUs))
device_ids = args.GPUs
# Set common random seed
if args.randomSeed is not None:
np.random.seed(args.randomSeed)
torch.manual_seed(args.randomSeed)
# Load checkpoint if provided
if args.checkpointPath is not None:
checkpointPath = args.checkpointPath
checkpoint = torch.load(checkpointPath)
# overwrite args
args = checkpoint['args']
args.checkpointPath = checkpointPath
# set Device to use
device = torch.device("cuda:"+str(device_ids[0]) if torch.cuda.is_available() else "cpu")
# Create unique label
today = datetime.now()
# Get commit number
# label = subprocess.check_output(["git", "describe", "--always"]).strip()
#specific to MBL lab workstation
label = subprocess.check_output(["C:/Program Files/git/bin/git", "describe", "--always"]).strip()
comment = today.strftime('%Y_%m_%d__%H%M%S') + "_"+ str(args.useBias) +"B_"+str(args.biasVal)+"bias_" + str(nImgs) + \
"I_"+ str(args.batchSize)+"BS_"+str(args.useSkipCon)+"Sk_" + str(args.fovInput) + "FOV_" + str(args.neighShape) + "nT_" \
+ str(args.ths) + "ths_" + str(label.decode("utf-8") ) + "_commit__" + args.outputPrefix
# Create output folder
save_folder = args.outputPath + "/" + comment
# If asked to continue a training, save in the same folder
if args.checkpointPath is not None:
save_folder = os.path.split(args.checkpointPath)[0]
print(save_folder)
# Create summary writer to log stuff
writer = SummaryWriter(log_dir=save_folder)
writer.add_text('Description',comment,0)
writer.flush()
# Load dataset
all_data = Dataset(args.datasetPath, args.randomSeed, \
fov=args.fovInput, neighShape=args.neighShape, img_indices=args.imagesToUse, get_full_imgs=False, center_region=None)
# Split validation and testing
train_size = int((1 - args.validationSplit) * len(all_data))
test_size = len(all_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(all_data, [train_size, test_size])
# Create data loaders
train_dataset = data.DataLoader(train_dataset, batch_size=args.batchSize,
shuffle=True, num_workers=num_workers, pin_memory=True)
test_dataset = data.DataLoader(test_dataset, batch_size=args.batchSize,
shuffle=True, num_workers=num_workers, pin_memory=True)
validate_every = np.round(len(train_dataset)*args.valEvery)
# Get Dataset information
nDepths = all_data.get_n_depths()
volShape, LFshape = all_data.__shape__()
LFshape = LFshape[0:4]
lateralTile = int(math.sqrt(nDepths))
# Find normalization values
maxInputTrain, maxVolumeTrain = all_data.get_max()
maxInputTest, maxVolumeTest = all_data.get_max()
# Create network
net = LFMNet(nDepths, args.useBias, args.useSkipCon, LFshape, LFfov=args.fovInput, use_small_unet=args.useShallowUnet).to(device)
optimizer = optim.Adam(net.parameters(), lr=args.learningRate)
lossFunction = nn.L1Loss()
# Create SSIM criteria
ssim = SSIM()
ssim.eval()
# Init bias and weights if needed
if args.useBias:
def bias_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d):
if m.bias is not None:
nn.init.constant_(m.bias.data, args.biasVal)
nn.init.kaiming_normal_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
nn.init.constant_(m.bias.data, args.biasVal)
nn.init.kaiming_normal_(m.weight)
net.apply(bias_init)
# Load network from checkpoint
if args.checkpointPath is not None:
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epochStart = checkpoint['epoch']
epochs = args.epochs + epochStart
train_loss = checkpoint['loss']
# Start distributed data parallel, as it's faster than DataParallel
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '1234'+str(device_ids[0])
torch.distributed.init_process_group(backend="nccl", rank=0, world_size=1)
# Move network to distributed data parallel
net = nn.parallel.DistributedDataParallel(net, device_ids=args.GPUs, output_device=args.GPUs[0]).to(device)
# timers
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
global_it_counter = 0
# define indices to grab for tensorboard visualization
indices_to_show = torch.randperm(test_size)[0:8]
# Init arrays to store losses
train_losses, test_losses = [], []
test_loss = 0
epochStart = 0
# Start training
for epoch in range(epochStart, args.epochs):
net.train()
torch.set_grad_enabled(True)
torch.cuda.empty_cache()
train_loss = 0
print('Training')
global_it_counter = 0
for nBatch,(inputs,labels) in enumerate(train_dataset):
# compute current iteration
curr_it = epoch*len(train_dataset) + nBatch
# start timer
start.record()
print('ep: ' + str(epoch) + ' ' + str(nBatch+1) + '/' + str(len(train_dataset)) + ' currIt: ' + str(curr_it))
optimizer.zero_grad()
# load data to gpu and normalize from 0 to 1
inputGPU = inputs.float().to(device) / maxInputTest
outputsGT = labels.float().to(device) / maxVolumeTrain
# Threshold GT to get rid of autofluorescence
if args.ths!=0:
outputsGT = imadjust(outputsGT, args.ths,outputsGT.max(), outputsGT.min(), outputsGT.max())
# Predict
outputsVol = net(inputGPU)
loss = lossFunction(outputsGT,outputsVol)
loss.backward()
train_loss += loss.item() / nDepths
optimizer.step()
global_it_counter += inputs.shape[0]
# Record training time
end.record()
torch.cuda.synchronize()
end_time = start.elapsed_time(end)
# Compute time per sample
elapsed_time = end_time/inputs.shape[0]
# Check if validation is required
if nBatch%validate_every==0:
print(comment)
# Write training images to tensorboard
lastBatchSize = min(outputsGT.shape[0],4)
gridOut2 = torch.cat((outputsGT[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach(), outputsVol[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach()), dim=0)
gridOut2 = tv.utils.make_grid(gridOut2, normalize=True, scale_each=False)
# Select some images in the batch for showing
indices_to_display = torch.randperm(inputGPU.shape[0])[0:4]
outputsGT = F.interpolate(outputsGT[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
outputsVol = F.interpolate(outputsVol[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
inputGPU = inputGPU[indices_to_display,:,:,:,:,:]
currPred = convert3Dto2DTiles(outputsVol, [lateralTile, lateralTile])
currGT = convert3Dto2DTiles(outputsGT, [lateralTile, lateralTile])
inputGrid = LF2Spatial(inputGPU, inputGPU.shape[2:])
gridPred = tv.utils.make_grid(currPred,normalize=True, scale_each=False)
gridGT = tv.utils.make_grid(currGT,normalize=True, scale_each=False)
gridInput = tv.utils.make_grid(inputGrid,normalize=True, scale_each=False)
gt = outputsGT[0,:,:,:,:].sum(3).repeat(3,1,1)
gt /= gt.max()
# Write to tensorboard
writer.add_image('z_proj_train',gt,curr_it)
writer.add_image('images_train_YZ_projection', gridOut2, curr_it)
writer.add_image('outputRGB_train', gridPred, curr_it)
writer.add_image('outputRGB_train_GT', gridGT, curr_it)
writer.add_image('input_train', gridInput, curr_it)
writer.add_scalar('Loss/train', train_loss/global_it_counter, curr_it)
writer.add_scalar('times/train', elapsed_time, curr_it)
# Restart
train_loss = 0.0
global_it_counter = 0
print('Validating')
net.eval()
with torch.no_grad():
avg_psnr = 0
avg_ssim = 0
test_loss = 0
start.record()
for nBatch,(inputs,labels) in enumerate(test_dataset):
inputGPU = inputs.float().to(device) / maxInputTest
outputsGT = labels.float().to(device) / maxVolumeTrain
# Threshold GT to get rid of autofluorescence
outputsGT = imadjust(outputsGT,args.ths,outputsGT.max(), outputsGT.min(), outputsGT.max())
outputsVol = net(inputGPU)
loss = lossFunction(outputsGT,outputsVol)
test_loss += loss.item() / nDepths
# Compute PSNR
lossMSE = nn.functional.mse_loss(outputsVol.to(device).detach(), outputsGT.to(device).detach())
avg_psnr += 10 * math.log10(1 / lossMSE.item())
# Compute ssim
avg_ssim += ssim(outputsVol[:,0,:,:,:].permute(0,3,1,2).contiguous().detach().to(device), outputsGT[:,0,:,:,:].permute(0,3,1,2).contiguous().detach().to(device)).sum()
end.record()
torch.cuda.synchronize()
lastBatchSize = min(outputsGT.shape[0],4)
gridOut2 = torch.cat((outputsGT[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach(), outputsVol[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach()), dim=0)
gridOut2 = tv.utils.make_grid(gridOut2, normalize=True, scale_each=False)
# process some for showing
indices_to_display = torch.randperm(inputGPU.shape[0])[0:lastBatchSize]
outputsGT = F.interpolate(outputsGT[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
outputsVol = F.interpolate(outputsVol[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
inputGPU = inputGPU[indices_to_display,:,:,:,:,:]
currPred = convert3Dto2DTiles(outputsVol, [lateralTile, lateralTile])
currGT = convert3Dto2DTiles(outputsGT, [lateralTile, lateralTile])
inputGrid = LF2Spatial(inputGPU, inputGPU.shape[2:])
gridPred = tv.utils.make_grid(currPred,normalize=True, scale_each=False)
gridGT = tv.utils.make_grid(currGT,normalize=True, scale_each=False)
gridInput = tv.utils.make_grid(inputGrid,normalize=True, scale_each=False)
# Write to tensorboard
writer.add_image('images_val_YZ_projection', gridOut2, curr_it)
writer.add_image('outputRGB_test', gridPred, curr_it)
writer.add_image('outputRGB_test_GT', gridGT, curr_it)
writer.add_image('input_test', gridInput, curr_it)
writer.add_scalar('Loss/test', test_loss/len(test_dataset), curr_it)
writer.add_scalar('Loss/psnr_val', avg_psnr/len(test_dataset), curr_it)
writer.add_scalar('Loss/ssim_val', avg_ssim/len(test_dataset), curr_it)
writer.add_scalar('LearningRate', args.learningRate, curr_it)
writer.add_scalar('times/val', start.elapsed_time(end)/test_size, curr_it)
net.train()
if epoch%2==0:
torch.save({
'epoch': epoch,
'args' : args,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': train_loss,
'dataset_path': args.datasetPath},
save_folder + '/model_'+str(epoch))
print(f"Epoch {epoch + 1}/{args.epochs}.. "
f"Train loss: {train_loss / len(train_dataset):.7f}.. "
f"Test loss: {test_loss / len(test_dataset):.7f}.. ")
if __name__ == '__main__':
main() | en | 0.352193 | # # Arguments # parser = argparse.ArgumentParser() # # Number of epochs # parser.add_argument('--epochs', type=int, default=1000) # # Validate every n percentage of the data # parser.add_argument('--valEvery', type=float, default=0.25) # # Image indices to use for training and validation # parser.add_argument('--imagesToUse', nargs='+', type=int, default=list(range(0,5,1))) # # List of GPUs to use: 0 1 2 for example # parser.add_argument('--GPUs', nargs='+', type=int, default=None) # # Batch size # parser.add_argument('--batchSize', type=int, default=128) # # Perentage of the data to use for validation, from 0 to 1 # parser.add_argument('--validationSplit', type=float, default=0.1) # # Bias initialization value # parser.add_argument('--biasVal', type=float, default=0.1) # # Learning rate # parser.add_argument('--learningRate', type=float, default=0.001) # # Use bias flag # parser.add_argument('--useBias', type=str2bool, default=True) # # Use skip connections flag # parser.add_argument('--useSkipCon', type=str2bool, default=False) # # User selected random seed # parser.add_argument('--randomSeed', type=int, default=None) # # fov of input or neighboarhood around lenslet to reconstruct # parser.add_argument('--fovInput', type=int, default=9) # # nT number of lenslets to reconstruct simultaneously use at training time # parser.add_argument('--neighShape', type=int, default=3) # # Flag to use shallow or large U-net # parser.add_argument('--useShallowUnet', type=str2bool, default=True) # # Lower threshold of GT stacks, to get rid of autofluorescence # parser.add_argument('--ths', type=float, default=0.03) # # Path to dataset # parser.add_argument('--datasetPath', nargs='?', default="BrainLFMConfocalDataset/Brain_40x_64Depths_362imgs.h5") # # Path to directory where models and tensorboard logs are stored # parser.add_argument('--outputPath', nargs='?', default="runs/") # # Prefix for current output folder # parser.add_argument('--outputPrefix', nargs='?', default="") # # Path to model in case of continuing a training # parser.add_argument('--checkpointPath', nargs='?', default=None) # args = parser.parse_args() # Setup multithreading # Select GPUs to use # Set common random seed # Load checkpoint if provided # overwrite args # set Device to use # Create unique label # Get commit number # label = subprocess.check_output(["git", "describe", "--always"]).strip() #specific to MBL lab workstation # Create output folder # If asked to continue a training, save in the same folder # Create summary writer to log stuff # Load dataset # Split validation and testing # Create data loaders # Get Dataset information # Find normalization values # Create network # Create SSIM criteria # Init bias and weights if needed # Load network from checkpoint # Start distributed data parallel, as it's faster than DataParallel # Move network to distributed data parallel # timers # define indices to grab for tensorboard visualization # Init arrays to store losses # Start training # compute current iteration # start timer # load data to gpu and normalize from 0 to 1 # Threshold GT to get rid of autofluorescence # Predict # Record training time # Compute time per sample # Check if validation is required # Write training images to tensorboard # Select some images in the batch for showing # Write to tensorboard # Restart # Threshold GT to get rid of autofluorescence # Compute PSNR # Compute ssim # process some for showing # Write to tensorboard | 2.33527 | 2 |
tools/utils.py | valsworthen/toxic-comment-classification | 10 | 10061 | <filename>tools/utils.py
"""Utilities"""
import pandas as pd
import numpy as np
from attrdict import AttrDict
import yaml
def average_predictions(cv_predictions, n_splits, num_samples = 153164, num_labels = 6):
"""Average k-fold predictions stored in a dict"""
preds = np.zeros((num_samples, num_labels))
for preds_i in cv_predictions:
preds += preds_i
preds /= n_splits
return preds
def geom_average_predictions(cv_predictions, n_splits, num_samples = 153164, num_labels = 6):
"""Average k-fold predictions stored in a dict"""
preds = np.ones((num_samples, num_labels))
for preds_i in cv_predictions:
preds *= preds_i
preds = preds **(1/n_splits)
return preds
def create_submission(preds, filename):
labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
subm = pd.read_csv('input/sample_submission.csv')
submid = pd.DataFrame({'id': subm["id"]})
submission = pd.concat([submid, pd.DataFrame(preds, columns = labels)], axis=1)
submission.to_csv(filename, index=False)
def format_time(sec):
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
return "{:.0f}h {:.0f}min {:.0f}s".format(h, m, s)
def read_yaml(filepath):
with open(filepath) as f:
config = yaml.load(f)
return AttrDict(config)
| <filename>tools/utils.py
"""Utilities"""
import pandas as pd
import numpy as np
from attrdict import AttrDict
import yaml
def average_predictions(cv_predictions, n_splits, num_samples = 153164, num_labels = 6):
"""Average k-fold predictions stored in a dict"""
preds = np.zeros((num_samples, num_labels))
for preds_i in cv_predictions:
preds += preds_i
preds /= n_splits
return preds
def geom_average_predictions(cv_predictions, n_splits, num_samples = 153164, num_labels = 6):
"""Average k-fold predictions stored in a dict"""
preds = np.ones((num_samples, num_labels))
for preds_i in cv_predictions:
preds *= preds_i
preds = preds **(1/n_splits)
return preds
def create_submission(preds, filename):
labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
subm = pd.read_csv('input/sample_submission.csv')
submid = pd.DataFrame({'id': subm["id"]})
submission = pd.concat([submid, pd.DataFrame(preds, columns = labels)], axis=1)
submission.to_csv(filename, index=False)
def format_time(sec):
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
return "{:.0f}h {:.0f}min {:.0f}s".format(h, m, s)
def read_yaml(filepath):
with open(filepath) as f:
config = yaml.load(f)
return AttrDict(config)
| en | 0.807561 | Utilities Average k-fold predictions stored in a dict Average k-fold predictions stored in a dict | 2.698051 | 3 |
dags/exercise1.py | mikef-nl/airflow-training-skeleton | 0 | 10062 | <filename>dags/exercise1.py<gh_stars>0
import airflow
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
args = {
'owner': 'Mike',
'start_date': airflow.utils.dates.days_ago(2),
}
dag = DAG(
dag_id='exercise1',
default_args=args,
schedule_interval=None
)
t1 = DummyOperator(task_id='task1', dag=dag)
t2 = DummyOperator(task_id='task2', dag=dag)
t3 = DummyOperator(task_id='task3', dag=dag)
t4 = DummyOperator(task_id='task4', dag=dag)
t5 = DummyOperator(task_id='task5', dag=dag)
t1 >> t2 >> [t3,t4] >> t5
| <filename>dags/exercise1.py<gh_stars>0
import airflow
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
args = {
'owner': 'Mike',
'start_date': airflow.utils.dates.days_ago(2),
}
dag = DAG(
dag_id='exercise1',
default_args=args,
schedule_interval=None
)
t1 = DummyOperator(task_id='task1', dag=dag)
t2 = DummyOperator(task_id='task2', dag=dag)
t3 = DummyOperator(task_id='task3', dag=dag)
t4 = DummyOperator(task_id='task4', dag=dag)
t5 = DummyOperator(task_id='task5', dag=dag)
t1 >> t2 >> [t3,t4] >> t5
| none | 1 | 2.235353 | 2 |
|
pyhelp/scripts/produce_meteo_maps.py | jnsebgosselin/help | 12 | 10063 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 27 10:54:25 2018
@author: jsgosselin
"""
# ---- Standard Library Imports
from itertools import product
import os.path as osp
import os
# ---- Third Party Imports
import netCDF4
from geopandas import GeoDataFrame
import pandas as pd
from shapely.geometry import Point, Polygon
import numpy as np
dirpath_netcdf = "D:/MeteoGrilleDaily"
# %% Get lat/lon from the netCDF
filename = osp.join(dirpath_netcdf, 'GCQ_v2_2000.nc')
netcdf_dset = netCDF4.Dataset(filename, 'r+')
lat = np.array(netcdf_dset['lat'])
lon = np.array(netcdf_dset['lon'])
netcdf_dset.close()
# %% Read the weather data from the InfoClimat grid
stack_precip = []
stack_tasmax = []
stack_tasmin = []
nyear = 0
for year in range(2000, 2015):
print("\rProcessing year %d" % year, end=' ')
filename = osp.join(dirpath_netcdf, 'GCQ_v2_%d.nc' % year)
netcdf_dset = netCDF4.Dataset(filename, 'r+')
stack_precip.append(np.array(netcdf_dset['pr']))
stack_tasmax.append(np.array(netcdf_dset['tasmax']))
stack_tasmin.append(np.array(netcdf_dset['tasmin']))
netcdf_dset.close()
nyear += 1
print('')
daily_precip = np.vstack(stack_precip)
daily_tasmax = np.vstack(stack_tasmax)
daily_tasmin = np.vstack(stack_tasmin)
daily_tasavg = (daily_tasmax + daily_tasmin) / 2
yearly_avg_precip = np.sum(daily_precip, axis=0) / nyear
yearly_avg_tasavg = np.average(daily_tasavg, axis=0)
yearly_avg_tasmax = np.average(daily_tasmax, axis=0)
yearly_avg_tasmin = np.average(daily_tasmin, axis=0)
# %% Create a grid
Np = len(lat) * len(lon)
geometry = []
arr_yearly_avg_precip = np.zeros(Np)
arr_avg_yearly_tasavg = np.zeros(Np)
arr_avg_yearly_tasmax = np.zeros(Np)
arr_avg_yearly_tasmin = np.zeros(Np)
i = 0
dx = dy = 0.1/2
for j, k in product(range(len(lat)), range(len(lon))):
print("\rProcessing cell %d of %d" % (i, Np), end=' ')
point = Point((lon[k], lat[j]))
# polygon = Polygon([(lon[k]-dx, lat[j]-dy),
# (lon[k]-dx, lat[j]+dy),
# (lon[k]+dx, lat[j]+dy),
# (lon[k]+dx, lat[j]-dy)])
geometry.append(point)
arr_yearly_avg_precip[i] = yearly_avg_precip[j, k]
arr_avg_yearly_tasavg[i] = yearly_avg_tasavg[j, k]
arr_avg_yearly_tasmax[i] = yearly_avg_tasmax[j, k]
arr_avg_yearly_tasmin[i] = yearly_avg_tasmin[j, k]
i += 1
print("\rProcessing cell %d of %d" % (i, Np))
# %%
print('\rFormating the data in a shapefile...', end=' ')
df = pd.DataFrame(data={'precip': arr_yearly_avg_precip,
'tasavg': arr_avg_yearly_tasavg,
'tasmax': arr_avg_yearly_tasmax,
'tasmin': arr_avg_yearly_tasmin})
crs = "+proj=longlat +ellps=GRS80 +datum=NAD83 +towgs84=0,0,0,0,0,0,0 +no_defs"
gdf = GeoDataFrame(df, crs=crs, geometry=geometry)
print('\rFormating the data in a shapefile... done')
print('\rSaving to Shapefile...', end=' ')
path_shp_out = ("D:/MeteoGrilleDaily/grid_yearly_meteo/grid_yearly_meteo.shp")
if not osp.exists(path_shp_out):
os.makedirs(path_shp_out)
gdf.to_file(path_shp_out)
print('\rSaving to Shapefile... done', end=' ')
| # -*- coding: utf-8 -*-
"""
Created on Tue Feb 27 10:54:25 2018
@author: jsgosselin
"""
# ---- Standard Library Imports
from itertools import product
import os.path as osp
import os
# ---- Third Party Imports
import netCDF4
from geopandas import GeoDataFrame
import pandas as pd
from shapely.geometry import Point, Polygon
import numpy as np
dirpath_netcdf = "D:/MeteoGrilleDaily"
# %% Get lat/lon from the netCDF
filename = osp.join(dirpath_netcdf, 'GCQ_v2_2000.nc')
netcdf_dset = netCDF4.Dataset(filename, 'r+')
lat = np.array(netcdf_dset['lat'])
lon = np.array(netcdf_dset['lon'])
netcdf_dset.close()
# %% Read the weather data from the InfoClimat grid
stack_precip = []
stack_tasmax = []
stack_tasmin = []
nyear = 0
for year in range(2000, 2015):
print("\rProcessing year %d" % year, end=' ')
filename = osp.join(dirpath_netcdf, 'GCQ_v2_%d.nc' % year)
netcdf_dset = netCDF4.Dataset(filename, 'r+')
stack_precip.append(np.array(netcdf_dset['pr']))
stack_tasmax.append(np.array(netcdf_dset['tasmax']))
stack_tasmin.append(np.array(netcdf_dset['tasmin']))
netcdf_dset.close()
nyear += 1
print('')
daily_precip = np.vstack(stack_precip)
daily_tasmax = np.vstack(stack_tasmax)
daily_tasmin = np.vstack(stack_tasmin)
daily_tasavg = (daily_tasmax + daily_tasmin) / 2
yearly_avg_precip = np.sum(daily_precip, axis=0) / nyear
yearly_avg_tasavg = np.average(daily_tasavg, axis=0)
yearly_avg_tasmax = np.average(daily_tasmax, axis=0)
yearly_avg_tasmin = np.average(daily_tasmin, axis=0)
# %% Create a grid
Np = len(lat) * len(lon)
geometry = []
arr_yearly_avg_precip = np.zeros(Np)
arr_avg_yearly_tasavg = np.zeros(Np)
arr_avg_yearly_tasmax = np.zeros(Np)
arr_avg_yearly_tasmin = np.zeros(Np)
i = 0
dx = dy = 0.1/2
for j, k in product(range(len(lat)), range(len(lon))):
print("\rProcessing cell %d of %d" % (i, Np), end=' ')
point = Point((lon[k], lat[j]))
# polygon = Polygon([(lon[k]-dx, lat[j]-dy),
# (lon[k]-dx, lat[j]+dy),
# (lon[k]+dx, lat[j]+dy),
# (lon[k]+dx, lat[j]-dy)])
geometry.append(point)
arr_yearly_avg_precip[i] = yearly_avg_precip[j, k]
arr_avg_yearly_tasavg[i] = yearly_avg_tasavg[j, k]
arr_avg_yearly_tasmax[i] = yearly_avg_tasmax[j, k]
arr_avg_yearly_tasmin[i] = yearly_avg_tasmin[j, k]
i += 1
print("\rProcessing cell %d of %d" % (i, Np))
# %%
print('\rFormating the data in a shapefile...', end=' ')
df = pd.DataFrame(data={'precip': arr_yearly_avg_precip,
'tasavg': arr_avg_yearly_tasavg,
'tasmax': arr_avg_yearly_tasmax,
'tasmin': arr_avg_yearly_tasmin})
crs = "+proj=longlat +ellps=GRS80 +datum=NAD83 +towgs84=0,0,0,0,0,0,0 +no_defs"
gdf = GeoDataFrame(df, crs=crs, geometry=geometry)
print('\rFormating the data in a shapefile... done')
print('\rSaving to Shapefile...', end=' ')
path_shp_out = ("D:/MeteoGrilleDaily/grid_yearly_meteo/grid_yearly_meteo.shp")
if not osp.exists(path_shp_out):
os.makedirs(path_shp_out)
gdf.to_file(path_shp_out)
print('\rSaving to Shapefile... done', end=' ') | en | 0.525168 | # -*- coding: utf-8 -*- Created on Tue Feb 27 10:54:25 2018 @author: jsgosselin # ---- Standard Library Imports # ---- Third Party Imports # %% Get lat/lon from the netCDF # %% Read the weather data from the InfoClimat grid # %% Create a grid # polygon = Polygon([(lon[k]-dx, lat[j]-dy), # (lon[k]-dx, lat[j]+dy), # (lon[k]+dx, lat[j]+dy), # (lon[k]+dx, lat[j]-dy)]) # %% | 2.304113 | 2 |
platform/core/tests/test_activitylogs/test_service.py | hackerwins/polyaxon | 0 | 10064 | <reponame>hackerwins/polyaxon<filename>platform/core/tests/test_activitylogs/test_service.py
# pylint:disable=ungrouped-imports
import uuid
import pytest
import activitylogs
from db.models.activitylogs import ActivityLog
from events.registry.experiment import EXPERIMENT_DELETED_TRIGGERED
from events.registry.user import USER_ACTIVATED
from factories.factory_experiments import ExperimentFactory
from factories.factory_users import UserFactory
from tests.base.case import BaseTest
@pytest.mark.activitylogs_mark
class ActivityLogsTest(BaseTest):
def setUp(self):
super().setUp()
self.experiment = ExperimentFactory()
self.admin = UserFactory(is_staff=True, is_superuser=True)
self.user = UserFactory()
def test_record_creates_activities(self):
assert ActivityLog.objects.count() == 0
activitylogs.record(ref_id=uuid.uuid4(),
event_type=USER_ACTIVATED,
instance=self.user,
actor_id=self.admin.id,
actor_name=self.admin.username)
assert ActivityLog.objects.count() == 1
activity = ActivityLog.objects.last()
assert activity.event_type == USER_ACTIVATED
assert activity.content_object == self.user
assert activity.actor == self.admin
activitylogs.record(ref_id=uuid.uuid4(),
event_type=EXPERIMENT_DELETED_TRIGGERED,
instance=self.experiment,
actor_id=self.admin.id,
actor_name=self.admin.username)
assert ActivityLog.objects.count() == 2
activity = ActivityLog.objects.last()
assert activity.event_type == EXPERIMENT_DELETED_TRIGGERED
assert activity.content_object == self.experiment
assert activity.actor == self.admin
| # pylint:disable=ungrouped-imports
import uuid
import pytest
import activitylogs
from db.models.activitylogs import ActivityLog
from events.registry.experiment import EXPERIMENT_DELETED_TRIGGERED
from events.registry.user import USER_ACTIVATED
from factories.factory_experiments import ExperimentFactory
from factories.factory_users import UserFactory
from tests.base.case import BaseTest
@pytest.mark.activitylogs_mark
class ActivityLogsTest(BaseTest):
def setUp(self):
super().setUp()
self.experiment = ExperimentFactory()
self.admin = UserFactory(is_staff=True, is_superuser=True)
self.user = UserFactory()
def test_record_creates_activities(self):
assert ActivityLog.objects.count() == 0
activitylogs.record(ref_id=uuid.uuid4(),
event_type=USER_ACTIVATED,
instance=self.user,
actor_id=self.admin.id,
actor_name=self.admin.username)
assert ActivityLog.objects.count() == 1
activity = ActivityLog.objects.last()
assert activity.event_type == USER_ACTIVATED
assert activity.content_object == self.user
assert activity.actor == self.admin
activitylogs.record(ref_id=uuid.uuid4(),
event_type=EXPERIMENT_DELETED_TRIGGERED,
instance=self.experiment,
actor_id=self.admin.id,
actor_name=self.admin.username)
assert ActivityLog.objects.count() == 2
activity = ActivityLog.objects.last()
assert activity.event_type == EXPERIMENT_DELETED_TRIGGERED
assert activity.content_object == self.experiment
assert activity.actor == self.admin | en | 0.255567 | # pylint:disable=ungrouped-imports | 1.981843 | 2 |
tests/framework/test_ingress.py | praus/shapy | 54 | 10065 | import unittest
from shapy.framework.tcelements import *
from shapy.framework.executor import run
from tests import TCTestCase
class TestIngress(TCTestCase):
def setUp(self):
self.interface = Interface('lo')
def test_ingress_filter(self):
q = IngressQdisc()
q.add(RedirectFilter('dst 127.0.0.3', 'eth0'))
self.interface.add_ingress(q)
self.interface.set_shaping()
| import unittest
from shapy.framework.tcelements import *
from shapy.framework.executor import run
from tests import TCTestCase
class TestIngress(TCTestCase):
def setUp(self):
self.interface = Interface('lo')
def test_ingress_filter(self):
q = IngressQdisc()
q.add(RedirectFilter('dst 127.0.0.3', 'eth0'))
self.interface.add_ingress(q)
self.interface.set_shaping()
| none | 1 | 2.187114 | 2 |
|
software/hippietrap/gradient.py | mayhem/led-chandelier | 2 | 10066 | <reponame>mayhem/led-chandelier<gh_stars>1-10
from colorsys import hsv_to_rgb
from math import fabs, fmod
import os
from hippietrap.color import Color
class Gradient(object):
def __init__(self, palette, num_leds = 1):
# palletes are in format [ (.345, (128, 0, 128)) ]
self._validate_palette(palette)
self.palette = palette
self.num_leds = num_leds
self.led_scale = 1.0
self.led_offset = 0.0
def _validate_palette(self, palette):
if len(palette) < 2:
raise ValueError("Palette must have at least two points.")
if palette[0][0] > 0.0:
raise ValueError("First point in palette must be less than or equal to 0.0")
if palette[-1][0] < 1.0:
raise ValueError("Last point in palette must be greater than or equal to 1.0")
def set_scale(self, scale):
self.led_scale = scale
def set_offset(self, offset):
self.led_offset = offset
def get_color(self, offset):
if offset < 0.0 or offset > 1.0:
raise IndexError("Invalid offset.")
for index in range(len(self.palette)):
# skip the first item
if index == 0:
continue
if self.palette[index][0] >= offset:
section_begin_offset = self.palette[index-1][0]
section_end_offset = self.palette[index][0]
percent = (offset - section_begin_offset) / (section_end_offset - section_begin_offset)
new_color = []
for color in range(3):
new_color.append(int(self.palette[index-1][1][color] +
((self.palette[index][1][color] - self.palette[index-1][1][color]) * percent)))
return Color(min(new_color[0], 255), min(new_color[1], 255), min(new_color[2], 255))
assert False
| from colorsys import hsv_to_rgb
from math import fabs, fmod
import os
from hippietrap.color import Color
class Gradient(object):
def __init__(self, palette, num_leds = 1):
# palletes are in format [ (.345, (128, 0, 128)) ]
self._validate_palette(palette)
self.palette = palette
self.num_leds = num_leds
self.led_scale = 1.0
self.led_offset = 0.0
def _validate_palette(self, palette):
if len(palette) < 2:
raise ValueError("Palette must have at least two points.")
if palette[0][0] > 0.0:
raise ValueError("First point in palette must be less than or equal to 0.0")
if palette[-1][0] < 1.0:
raise ValueError("Last point in palette must be greater than or equal to 1.0")
def set_scale(self, scale):
self.led_scale = scale
def set_offset(self, offset):
self.led_offset = offset
def get_color(self, offset):
if offset < 0.0 or offset > 1.0:
raise IndexError("Invalid offset.")
for index in range(len(self.palette)):
# skip the first item
if index == 0:
continue
if self.palette[index][0] >= offset:
section_begin_offset = self.palette[index-1][0]
section_end_offset = self.palette[index][0]
percent = (offset - section_begin_offset) / (section_end_offset - section_begin_offset)
new_color = []
for color in range(3):
new_color.append(int(self.palette[index-1][1][color] +
((self.palette[index][1][color] - self.palette[index-1][1][color]) * percent)))
return Color(min(new_color[0], 255), min(new_color[1], 255), min(new_color[2], 255))
assert False | en | 0.811047 | # palletes are in format [ (.345, (128, 0, 128)) ] # skip the first item | 3.091043 | 3 |
stix_shifter_modules/elastic/entry_point.py | 6un9-h0-Dan/stix-shifter | 1 | 10067 | from stix_shifter_utils.utils.entry_point_base import EntryPointBase
from stix_shifter_utils.modules.cim.stix_translation.cim_data_mapper import CimDataMapper
from stix_shifter_utils.modules.car.stix_translation.car_data_mapper import CarDataMapper
from .stix_translation.stix_to_elastic import StixToElastic
class EntryPoint(EntryPointBase):
def __init__(self, connection={}, configuration={}, options={}):
super().__init__(options)
self.add_dialect('default', query_translator=StixToElastic(), data_mapper=CarDataMapper(options), default=True)
self.add_dialect('cim', query_translator=StixToElastic(), data_mapper=CimDataMapper(options), default_include=False)
self.add_dialect('car', query_translator=StixToElastic(), data_mapper=CarDataMapper(options), default_include=False) | from stix_shifter_utils.utils.entry_point_base import EntryPointBase
from stix_shifter_utils.modules.cim.stix_translation.cim_data_mapper import CimDataMapper
from stix_shifter_utils.modules.car.stix_translation.car_data_mapper import CarDataMapper
from .stix_translation.stix_to_elastic import StixToElastic
class EntryPoint(EntryPointBase):
def __init__(self, connection={}, configuration={}, options={}):
super().__init__(options)
self.add_dialect('default', query_translator=StixToElastic(), data_mapper=CarDataMapper(options), default=True)
self.add_dialect('cim', query_translator=StixToElastic(), data_mapper=CimDataMapper(options), default_include=False)
self.add_dialect('car', query_translator=StixToElastic(), data_mapper=CarDataMapper(options), default_include=False) | none | 1 | 1.852056 | 2 |
|
src/http_pick/pickergui.py | thomaspcole/http-pick | 0 | 10068 | <filename>src/http_pick/pickergui.py
from PyQt5.QtWidgets import (QMainWindow, QToolButton, QWidget, QHBoxLayout)
from PyQt5.QtGui import QIcon
from PyQt5 import QtCore
from math import floor
import sys
class MainWindow(QMainWindow):
def __init__(self, browsers, iconsize=72, displayappname=False, x=0, y=0, callback=lambda v: print(v)):
super().__init__()
self.setFocus()
self.centralwidget = QWidget()
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setCentralWidget(self.centralwidget)
self.lay = QHBoxLayout(self.centralwidget)
self.lay.setContentsMargins(0,0,0,0)
self.lay.setSpacing(0)
xOffset = floor((iconsize*len(browsers))/2)
yOffset = floor(iconsize*1.25)
self.move(x-xOffset,y-yOffset)
for b in browsers:
self.btn = QToolButton(self)
if '/' in b: #'Normal' launch path
path = b
appname = path.split('/')
elif '.' in b: #Flatpak ref
path = b
appname = path.split('.')
self.btn.setIcon(QIcon.fromTheme(appname[-1]))
self.btn.setIconSize(QtCore.QSize(iconsize,iconsize))
self.btn.setStyleSheet("QToolButton {background-color: transparent; border: 0px; color: white;}")
if(displayappname):
self.btn.setToolButtonStyle(QtCore.Qt.ToolButtonStyle.ToolButtonTextUnderIcon)
self.btn.setText(appname[-1].capitalize())
self.btn.clicked.connect(lambda v, path=path : callback(path))
self.lay.addWidget(self.btn)
def on_focusChanged(self):
if(self.isActiveWindow() == False):
quit() | <filename>src/http_pick/pickergui.py
from PyQt5.QtWidgets import (QMainWindow, QToolButton, QWidget, QHBoxLayout)
from PyQt5.QtGui import QIcon
from PyQt5 import QtCore
from math import floor
import sys
class MainWindow(QMainWindow):
def __init__(self, browsers, iconsize=72, displayappname=False, x=0, y=0, callback=lambda v: print(v)):
super().__init__()
self.setFocus()
self.centralwidget = QWidget()
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setCentralWidget(self.centralwidget)
self.lay = QHBoxLayout(self.centralwidget)
self.lay.setContentsMargins(0,0,0,0)
self.lay.setSpacing(0)
xOffset = floor((iconsize*len(browsers))/2)
yOffset = floor(iconsize*1.25)
self.move(x-xOffset,y-yOffset)
for b in browsers:
self.btn = QToolButton(self)
if '/' in b: #'Normal' launch path
path = b
appname = path.split('/')
elif '.' in b: #Flatpak ref
path = b
appname = path.split('.')
self.btn.setIcon(QIcon.fromTheme(appname[-1]))
self.btn.setIconSize(QtCore.QSize(iconsize,iconsize))
self.btn.setStyleSheet("QToolButton {background-color: transparent; border: 0px; color: white;}")
if(displayappname):
self.btn.setToolButtonStyle(QtCore.Qt.ToolButtonStyle.ToolButtonTextUnderIcon)
self.btn.setText(appname[-1].capitalize())
self.btn.clicked.connect(lambda v, path=path : callback(path))
self.lay.addWidget(self.btn)
def on_focusChanged(self):
if(self.isActiveWindow() == False):
quit() | en | 0.625489 | #'Normal' launch path #Flatpak ref | 2.520195 | 3 |
genetic_pwdcrack.py | robotenique/AI-programming | 3 | 10069 | """
Crack a password using a genetic algorithm!
"""
import random as rnd
def main():
"""
This file implements a genetic algorithm to solve the problem of
cracking a given password, by creating 'generations' of different
words, selecting the best, breeeding them, applying a simple crossover
(randomized) and a mutation chance.
"""
#variables dict: Define the problem constants
genetic_variables = {
'password' : "<PASSWORD>",
'size_population' : 100,
'best_sample' : 20,
'lucky_few' : 20,
'number_of_child' : 5,
'number_of_generations' : 10000, #Overkill >:D
'chance_of_mutation' : .5
}
prob = genetic_variables
#program
if (prob['best_sample'] + prob['lucky_few'])/2*prob['number_of_child'] != prob['size_population']:
print ("population size not stable")
return
last_gen, _ = genetic_algorithm(**genetic_variables)
print("Last generation: \n\n")
print(last_gen)
def genetic_algorithm(**kwargs):
"""
Execute the genetic algorithm.
This algorithm takes a dict as an argument.
It will iterate based on the variable 'number_of_generations', and return
the last_gen and the historic
"""
# Unpack the values from the dict
password = kwargs['password']
size_population = kwargs['size_population']
best_sample = kwargs['best_sample']
lucky_few = kwargs['lucky_few']
number_of_child = kwargs['number_of_child']
number_of_generations = kwargs['number_of_generations']
chance_of_mutation = kwargs['chance_of_mutation']
hist = []
# The genetic algorithm
curr_pop = initial_pop(size_population, password)
hist = curr_pop
last_found = -1
for _ in range (number_of_generations):
curr_pop = next_gen(curr_pop, password, best_sample, lucky_few, number_of_child, chance_of_mutation)
hist.append(curr_pop)
if check_solution(curr_pop, password):
last_found = _
break
if last_found != -1:
print(f"Found a solution in the {last_found} generation!!")
else:
print("No solution found! D':")
return curr_pop, hist
def next_gen(curr_pop, password, best_sample, lucky_few, number_of_child, chance_of_mutation):
"""
-> This is the main task of the Genetic Algorithm <-
Given the current population, apply the following steps:
- Compute the fitness of each individual in the population
- Select the best ones (and some lucky guys)
- Make them reproduce
- Mutate the children
- Return this new population
"""
pop_sorted = compute_perf_pop(curr_pop, password)
next_breeders = select_from_population(pop_sorted, best_sample, lucky_few)
next_pop = create_children(next_breeders, number_of_child)
next_gen = mutate_pop(next_pop, chance_of_mutation)
return next_gen
def initial_pop(size, password):
"""
Generate a population consisting of random words, each with the same
length as the password, and the population has the size specified.
"""
return [word_generate(len(password)) for _ in range(size)]
def fitness (password, test_word):
"""
The fitness function:
fitness(test_word): (# of correct chars) / (total number of chars)
fitness(test_word) = 0 if # of correct chars = 0
fitness(test_word) = 100 if # of correct chars = total number of chars
"""
if (len(test_word) != len(password)):
print("Incompatible password...")
return
else:
score = (1 if password[i] == test_word[i] else 0 for i in range(len(password)))
return sum(score)*100/len(password)
def compute_perf_pop(population, password):
"""
Return the population, sorted by the fitness from each individual
"""
populationPerf = {ind:fitness(password, ind) for ind in population}
# Sort by fitness, reversed (best ones in the beginning of the list)
return sorted(populationPerf.items(), key= lambda it: it[1], reverse=True)
def select_from_population(pop_sorted, best_sample, lucky_few):
"""
Create the next breeders, with 'best_sample' individuals which have the
top fitness value from the population, and 'lucky_few' individuals which
are randomly selected.
"""
next_gen = []
for i in range(best_sample):
next_gen.append(pop_sorted[i][0])
# Simple lucky few: randomly select some elements from the population
for i in range(lucky_few):
next_gen.append(rnd.choice(pop_sorted)[0])
rnd.shuffle(next_gen)
return next_gen
def create_children(breeders, nof_childs):
"""
Create the next population of individuals, by breeding two by two
"""
next_pop = []
mid_pos = len(breeders)//2 # len(breeders) must be an even number
for ind_1, ind_2 in zip(breeders[:mid_pos], breeders[mid_pos:]):
for _ in range(nof_childs):
next_pop.append(create_child(ind_1, ind_2))
return next_pop
def mutate_pop(population, chance):
"""
Given a chance for mutation, this apply the mutation layer
to the genetic algorithm, by generating a mutation with the chance
specified.
"""
for i in range(len(population)):
if rnd.random() < chance:
population[i] = mutate_word(population[i])
return population
def mutate_word(word):
"""
Mutate a letter(gene) from the word, then return it
"""
pos = int(rnd.random()*len(word))
word = word[:pos] + chr(97 + int(26*rnd.random())) + word[pos + 1:]
return word
def create_child(ind_1, ind_2):
"""
For each letter of the child, get a random gene from ind_1 or ind_2
in the i-th position.
"""
temp = [ind_1[i] if rnd.random() < 0.5 else ind_2[i] for i in range(len(ind_1))]
return "".join(temp)
def word_generate(length):
"""
Generate a string with random lowercase letters, with length = length!
"""
# Generate a random letter from alphabet, lowercase, and add to result
return "".join((chr(97 + rnd.randint(0, 26)) for _ in range(length)))
def check_solution(population, password):
"""
Check if the population found a solution to the problem
"""
return any(ind == password for ind in population)
if __name__ == '__main__':
main()
| """
Crack a password using a genetic algorithm!
"""
import random as rnd
def main():
"""
This file implements a genetic algorithm to solve the problem of
cracking a given password, by creating 'generations' of different
words, selecting the best, breeeding them, applying a simple crossover
(randomized) and a mutation chance.
"""
#variables dict: Define the problem constants
genetic_variables = {
'password' : "<PASSWORD>",
'size_population' : 100,
'best_sample' : 20,
'lucky_few' : 20,
'number_of_child' : 5,
'number_of_generations' : 10000, #Overkill >:D
'chance_of_mutation' : .5
}
prob = genetic_variables
#program
if (prob['best_sample'] + prob['lucky_few'])/2*prob['number_of_child'] != prob['size_population']:
print ("population size not stable")
return
last_gen, _ = genetic_algorithm(**genetic_variables)
print("Last generation: \n\n")
print(last_gen)
def genetic_algorithm(**kwargs):
"""
Execute the genetic algorithm.
This algorithm takes a dict as an argument.
It will iterate based on the variable 'number_of_generations', and return
the last_gen and the historic
"""
# Unpack the values from the dict
password = kwargs['password']
size_population = kwargs['size_population']
best_sample = kwargs['best_sample']
lucky_few = kwargs['lucky_few']
number_of_child = kwargs['number_of_child']
number_of_generations = kwargs['number_of_generations']
chance_of_mutation = kwargs['chance_of_mutation']
hist = []
# The genetic algorithm
curr_pop = initial_pop(size_population, password)
hist = curr_pop
last_found = -1
for _ in range (number_of_generations):
curr_pop = next_gen(curr_pop, password, best_sample, lucky_few, number_of_child, chance_of_mutation)
hist.append(curr_pop)
if check_solution(curr_pop, password):
last_found = _
break
if last_found != -1:
print(f"Found a solution in the {last_found} generation!!")
else:
print("No solution found! D':")
return curr_pop, hist
def next_gen(curr_pop, password, best_sample, lucky_few, number_of_child, chance_of_mutation):
"""
-> This is the main task of the Genetic Algorithm <-
Given the current population, apply the following steps:
- Compute the fitness of each individual in the population
- Select the best ones (and some lucky guys)
- Make them reproduce
- Mutate the children
- Return this new population
"""
pop_sorted = compute_perf_pop(curr_pop, password)
next_breeders = select_from_population(pop_sorted, best_sample, lucky_few)
next_pop = create_children(next_breeders, number_of_child)
next_gen = mutate_pop(next_pop, chance_of_mutation)
return next_gen
def initial_pop(size, password):
"""
Generate a population consisting of random words, each with the same
length as the password, and the population has the size specified.
"""
return [word_generate(len(password)) for _ in range(size)]
def fitness (password, test_word):
"""
The fitness function:
fitness(test_word): (# of correct chars) / (total number of chars)
fitness(test_word) = 0 if # of correct chars = 0
fitness(test_word) = 100 if # of correct chars = total number of chars
"""
if (len(test_word) != len(password)):
print("Incompatible password...")
return
else:
score = (1 if password[i] == test_word[i] else 0 for i in range(len(password)))
return sum(score)*100/len(password)
def compute_perf_pop(population, password):
"""
Return the population, sorted by the fitness from each individual
"""
populationPerf = {ind:fitness(password, ind) for ind in population}
# Sort by fitness, reversed (best ones in the beginning of the list)
return sorted(populationPerf.items(), key= lambda it: it[1], reverse=True)
def select_from_population(pop_sorted, best_sample, lucky_few):
"""
Create the next breeders, with 'best_sample' individuals which have the
top fitness value from the population, and 'lucky_few' individuals which
are randomly selected.
"""
next_gen = []
for i in range(best_sample):
next_gen.append(pop_sorted[i][0])
# Simple lucky few: randomly select some elements from the population
for i in range(lucky_few):
next_gen.append(rnd.choice(pop_sorted)[0])
rnd.shuffle(next_gen)
return next_gen
def create_children(breeders, nof_childs):
"""
Create the next population of individuals, by breeding two by two
"""
next_pop = []
mid_pos = len(breeders)//2 # len(breeders) must be an even number
for ind_1, ind_2 in zip(breeders[:mid_pos], breeders[mid_pos:]):
for _ in range(nof_childs):
next_pop.append(create_child(ind_1, ind_2))
return next_pop
def mutate_pop(population, chance):
"""
Given a chance for mutation, this apply the mutation layer
to the genetic algorithm, by generating a mutation with the chance
specified.
"""
for i in range(len(population)):
if rnd.random() < chance:
population[i] = mutate_word(population[i])
return population
def mutate_word(word):
"""
Mutate a letter(gene) from the word, then return it
"""
pos = int(rnd.random()*len(word))
word = word[:pos] + chr(97 + int(26*rnd.random())) + word[pos + 1:]
return word
def create_child(ind_1, ind_2):
"""
For each letter of the child, get a random gene from ind_1 or ind_2
in the i-th position.
"""
temp = [ind_1[i] if rnd.random() < 0.5 else ind_2[i] for i in range(len(ind_1))]
return "".join(temp)
def word_generate(length):
"""
Generate a string with random lowercase letters, with length = length!
"""
# Generate a random letter from alphabet, lowercase, and add to result
return "".join((chr(97 + rnd.randint(0, 26)) for _ in range(length)))
def check_solution(population, password):
"""
Check if the population found a solution to the problem
"""
return any(ind == password for ind in population)
if __name__ == '__main__':
main()
| en | 0.830542 | Crack a password using a genetic algorithm! This file implements a genetic algorithm to solve the problem of cracking a given password, by creating 'generations' of different words, selecting the best, breeeding them, applying a simple crossover (randomized) and a mutation chance. #variables dict: Define the problem constants #Overkill >:D #program Execute the genetic algorithm. This algorithm takes a dict as an argument. It will iterate based on the variable 'number_of_generations', and return the last_gen and the historic # Unpack the values from the dict # The genetic algorithm -> This is the main task of the Genetic Algorithm <- Given the current population, apply the following steps: - Compute the fitness of each individual in the population - Select the best ones (and some lucky guys) - Make them reproduce - Mutate the children - Return this new population Generate a population consisting of random words, each with the same length as the password, and the population has the size specified. The fitness function: fitness(test_word): (# of correct chars) / (total number of chars) fitness(test_word) = 0 if # of correct chars = 0 fitness(test_word) = 100 if # of correct chars = total number of chars Return the population, sorted by the fitness from each individual # Sort by fitness, reversed (best ones in the beginning of the list) Create the next breeders, with 'best_sample' individuals which have the top fitness value from the population, and 'lucky_few' individuals which are randomly selected. # Simple lucky few: randomly select some elements from the population Create the next population of individuals, by breeding two by two # len(breeders) must be an even number Given a chance for mutation, this apply the mutation layer to the genetic algorithm, by generating a mutation with the chance specified. Mutate a letter(gene) from the word, then return it For each letter of the child, get a random gene from ind_1 or ind_2 in the i-th position. Generate a string with random lowercase letters, with length = length! # Generate a random letter from alphabet, lowercase, and add to result Check if the population found a solution to the problem | 3.763436 | 4 |
openverse_catalog/dags/providers/provider_api_scripts/science_museum.py | yavik-kapadia/openverse-catalog | 25 | 10070 | <reponame>yavik-kapadia/openverse-catalog<filename>openverse_catalog/dags/providers/provider_api_scripts/science_museum.py
import logging
from common.licenses import get_license_info
from common.loader import provider_details as prov
from common.requester import DelayedRequester
from common.storage.image import ImageStore
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s: %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
LIMIT = 100
DELAY = 5.0
RETRIES = 3
PROVIDER = prov.SCIENCE_DEFAULT_PROVIDER
ENDPOINT = "https://collection.sciencemuseumgroup.org.uk/search/"
delay_request = DelayedRequester(delay=DELAY)
image_store = ImageStore(provider=PROVIDER)
HEADERS = {"Accept": "application/json"}
DEFAULT_QUERY_PARAMS = {
"has_image": 1,
"image_license": "CC",
"page[size]": LIMIT,
"page[number]": 0,
"date[from]": 0,
"date[to]": 1500,
}
YEAR_RANGE = [
(0, 1500),
(1500, 1750),
(1750, 1825),
(1825, 1850),
(1850, 1875),
(1875, 1900),
(1900, 1915),
(1915, 1940),
(1940, 1965),
(1965, 1990),
(1990, 2020),
]
# global variable to keep track of records pulled
RECORD_IDS = []
def main():
logger.info("Begin: Science Museum script")
for year_range in YEAR_RANGE:
logger.info(f"Running for years {year_range}")
from_year, to_year = year_range
image_count = _page_records(from_year=from_year, to_year=to_year)
logger.info(f"Images pulled till now {image_count}")
image_count = image_store.commit()
logger.info(f"Total images pulled {image_count}")
def _page_records(from_year, to_year):
image_count = 0
page_number = 0
condition = True
while condition:
query_param = _get_query_param(
page_number=page_number, from_year=from_year, to_year=to_year
)
batch_data = _get_batch_objects(query_param=query_param)
if type(batch_data) == list:
if len(batch_data) > 0:
image_count = _handle_object_data(batch_data)
page_number += 1
else:
condition = False
else:
condition = False
return image_count
def _get_query_param(
page_number=0, from_year=0, to_year=1500, default_query_param=None
):
if default_query_param is None:
default_query_param = DEFAULT_QUERY_PARAMS
query_param = default_query_param.copy()
query_param["page[number]"] = page_number
query_param["date[from]"] = from_year
query_param["date[to]"] = to_year
return query_param
def _get_batch_objects(
endpoint=ENDPOINT, headers=None, retries=RETRIES, query_param=None
):
if headers is None:
headers = HEADERS.copy()
data = None
for retry in range(retries):
response = delay_request.get(endpoint, query_param, headers=headers)
try:
response_json = response.json()
if "data" in response_json.keys():
data = response_json.get("data")
break
except Exception as e:
logger.error(f"Failed to due to {e}")
return data
def _handle_object_data(batch_data):
image_count = 0
for obj_ in batch_data:
id_ = obj_.get("id")
if id_ in RECORD_IDS:
continue
RECORD_IDS.append(id_)
foreign_landing_url = obj_.get("links", {}).get("self")
if foreign_landing_url is None:
continue
obj_attributes = obj_.get("attributes")
if obj_attributes is None:
continue
title = obj_attributes.get("summary_title")
creator = _get_creator_info(obj_attributes)
metadata = _get_metadata(obj_attributes)
multimedia = obj_attributes.get("multimedia")
if multimedia is None:
continue
for image_data in multimedia:
foreign_id = image_data.get("admin", {}).get("uid")
if foreign_id is None:
continue
processed = image_data.get("processed")
source = image_data.get("source")
image_url, height, width = _get_image_info(processed)
if image_url is None:
continue
license_version = _get_license_version(source)
if license_version is None:
continue
license_, version = license_version.lower().split(" ")
license_ = license_.replace("cc-", "")
license_info = get_license_info(license_=license_, license_version=version)
thumbnail_url = _get_thumbnail_url(processed)
image_count = image_store.add_item(
foreign_identifier=foreign_id,
foreign_landing_url=foreign_landing_url,
image_url=image_url,
height=height,
width=width,
license_info=license_info,
thumbnail_url=thumbnail_url,
creator=creator,
title=title,
meta_data=metadata,
)
return image_count
def _get_creator_info(obj_attr):
creator_info = None
life_cycle = obj_attr.get("lifecycle")
if life_cycle:
creation = life_cycle.get("creation")
if type(creation) == list:
maker = creation[0].get("maker")
if type(maker) == list:
creator_info = maker[0].get("summary_title")
return creator_info
def _get_image_info(processed):
if processed.get("large"):
image = processed.get("large").get("location")
measurements = processed.get("large").get("measurements")
elif processed.get("medium"):
image = processed.get("medium").get("location")
measurements = processed.get("medium").get("measurements")
else:
image = None
measurements = None
image = check_url(image)
height, width = _get_dimensions(measurements)
return image, height, width
def _get_thumbnail_url(processed):
if processed.get("large_thumbnail"):
image = processed.get("large_thumbnail").get("location")
elif processed.get("medium_thumbnail"):
image = processed.get("medium_thumbnail").get("location")
elif processed.get("small_thumbnail"):
image = processed.get("small_thumbnail").get("location")
else:
image = None
thumbnail_url = check_url(image)
return thumbnail_url
def check_url(image_url):
base_url = "https://coimages.sciencemuseumgroup.org.uk/images/"
if image_url:
if "http" in image_url:
checked_url = image_url
else:
checked_url = base_url + image_url
else:
checked_url = None
return checked_url
def _get_dimensions(measurements):
height_width = {}
if measurements:
dimensions = measurements.get("dimensions")
if dimensions:
for dim in dimensions:
height_width[dim.get("dimension")] = dim.get("value")
return height_width.get("height"), height_width.get("width")
def _get_license_version(source):
license_version = None
if source:
legal = source.get("legal")
if legal:
rights = legal.get("rights")
if type(rights) == list:
license_version = rights[0].get("usage_terms")
return license_version
def _get_metadata(obj_attr):
metadata = {}
identifier = obj_attr.get("identifier")
if type(identifier) == list:
metadata["accession number"] = identifier[0].get("value")
name = obj_attr.get("name")
if type(name) == list:
metadata["name"] = name[0].get("value")
category = obj_attr.get("categories")
if type(category) == list:
metadata["category"] = category[0].get("value")
creditline = obj_attr.get("legal")
if type(creditline) == dict:
metadata["creditline"] = creditline.get("credit_line")
description = obj_attr.get("description")
if type(description) == list:
metadata["description"] = description[0].get("value")
return metadata
if __name__ == "__main__":
main()
| import logging
from common.licenses import get_license_info
from common.loader import provider_details as prov
from common.requester import DelayedRequester
from common.storage.image import ImageStore
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s: %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
LIMIT = 100
DELAY = 5.0
RETRIES = 3
PROVIDER = prov.SCIENCE_DEFAULT_PROVIDER
ENDPOINT = "https://collection.sciencemuseumgroup.org.uk/search/"
delay_request = DelayedRequester(delay=DELAY)
image_store = ImageStore(provider=PROVIDER)
HEADERS = {"Accept": "application/json"}
DEFAULT_QUERY_PARAMS = {
"has_image": 1,
"image_license": "CC",
"page[size]": LIMIT,
"page[number]": 0,
"date[from]": 0,
"date[to]": 1500,
}
YEAR_RANGE = [
(0, 1500),
(1500, 1750),
(1750, 1825),
(1825, 1850),
(1850, 1875),
(1875, 1900),
(1900, 1915),
(1915, 1940),
(1940, 1965),
(1965, 1990),
(1990, 2020),
]
# global variable to keep track of records pulled
RECORD_IDS = []
def main():
logger.info("Begin: Science Museum script")
for year_range in YEAR_RANGE:
logger.info(f"Running for years {year_range}")
from_year, to_year = year_range
image_count = _page_records(from_year=from_year, to_year=to_year)
logger.info(f"Images pulled till now {image_count}")
image_count = image_store.commit()
logger.info(f"Total images pulled {image_count}")
def _page_records(from_year, to_year):
image_count = 0
page_number = 0
condition = True
while condition:
query_param = _get_query_param(
page_number=page_number, from_year=from_year, to_year=to_year
)
batch_data = _get_batch_objects(query_param=query_param)
if type(batch_data) == list:
if len(batch_data) > 0:
image_count = _handle_object_data(batch_data)
page_number += 1
else:
condition = False
else:
condition = False
return image_count
def _get_query_param(
page_number=0, from_year=0, to_year=1500, default_query_param=None
):
if default_query_param is None:
default_query_param = DEFAULT_QUERY_PARAMS
query_param = default_query_param.copy()
query_param["page[number]"] = page_number
query_param["date[from]"] = from_year
query_param["date[to]"] = to_year
return query_param
def _get_batch_objects(
endpoint=ENDPOINT, headers=None, retries=RETRIES, query_param=None
):
if headers is None:
headers = HEADERS.copy()
data = None
for retry in range(retries):
response = delay_request.get(endpoint, query_param, headers=headers)
try:
response_json = response.json()
if "data" in response_json.keys():
data = response_json.get("data")
break
except Exception as e:
logger.error(f"Failed to due to {e}")
return data
def _handle_object_data(batch_data):
image_count = 0
for obj_ in batch_data:
id_ = obj_.get("id")
if id_ in RECORD_IDS:
continue
RECORD_IDS.append(id_)
foreign_landing_url = obj_.get("links", {}).get("self")
if foreign_landing_url is None:
continue
obj_attributes = obj_.get("attributes")
if obj_attributes is None:
continue
title = obj_attributes.get("summary_title")
creator = _get_creator_info(obj_attributes)
metadata = _get_metadata(obj_attributes)
multimedia = obj_attributes.get("multimedia")
if multimedia is None:
continue
for image_data in multimedia:
foreign_id = image_data.get("admin", {}).get("uid")
if foreign_id is None:
continue
processed = image_data.get("processed")
source = image_data.get("source")
image_url, height, width = _get_image_info(processed)
if image_url is None:
continue
license_version = _get_license_version(source)
if license_version is None:
continue
license_, version = license_version.lower().split(" ")
license_ = license_.replace("cc-", "")
license_info = get_license_info(license_=license_, license_version=version)
thumbnail_url = _get_thumbnail_url(processed)
image_count = image_store.add_item(
foreign_identifier=foreign_id,
foreign_landing_url=foreign_landing_url,
image_url=image_url,
height=height,
width=width,
license_info=license_info,
thumbnail_url=thumbnail_url,
creator=creator,
title=title,
meta_data=metadata,
)
return image_count
def _get_creator_info(obj_attr):
creator_info = None
life_cycle = obj_attr.get("lifecycle")
if life_cycle:
creation = life_cycle.get("creation")
if type(creation) == list:
maker = creation[0].get("maker")
if type(maker) == list:
creator_info = maker[0].get("summary_title")
return creator_info
def _get_image_info(processed):
if processed.get("large"):
image = processed.get("large").get("location")
measurements = processed.get("large").get("measurements")
elif processed.get("medium"):
image = processed.get("medium").get("location")
measurements = processed.get("medium").get("measurements")
else:
image = None
measurements = None
image = check_url(image)
height, width = _get_dimensions(measurements)
return image, height, width
def _get_thumbnail_url(processed):
if processed.get("large_thumbnail"):
image = processed.get("large_thumbnail").get("location")
elif processed.get("medium_thumbnail"):
image = processed.get("medium_thumbnail").get("location")
elif processed.get("small_thumbnail"):
image = processed.get("small_thumbnail").get("location")
else:
image = None
thumbnail_url = check_url(image)
return thumbnail_url
def check_url(image_url):
base_url = "https://coimages.sciencemuseumgroup.org.uk/images/"
if image_url:
if "http" in image_url:
checked_url = image_url
else:
checked_url = base_url + image_url
else:
checked_url = None
return checked_url
def _get_dimensions(measurements):
height_width = {}
if measurements:
dimensions = measurements.get("dimensions")
if dimensions:
for dim in dimensions:
height_width[dim.get("dimension")] = dim.get("value")
return height_width.get("height"), height_width.get("width")
def _get_license_version(source):
license_version = None
if source:
legal = source.get("legal")
if legal:
rights = legal.get("rights")
if type(rights) == list:
license_version = rights[0].get("usage_terms")
return license_version
def _get_metadata(obj_attr):
metadata = {}
identifier = obj_attr.get("identifier")
if type(identifier) == list:
metadata["accession number"] = identifier[0].get("value")
name = obj_attr.get("name")
if type(name) == list:
metadata["name"] = name[0].get("value")
category = obj_attr.get("categories")
if type(category) == list:
metadata["category"] = category[0].get("value")
creditline = obj_attr.get("legal")
if type(creditline) == dict:
metadata["creditline"] = creditline.get("credit_line")
description = obj_attr.get("description")
if type(description) == list:
metadata["description"] = description[0].get("value")
return metadata
if __name__ == "__main__":
main() | en | 0.881368 | # global variable to keep track of records pulled | 2.37825 | 2 |
digitaltape.py | heerdyes/tapegame | 0 | 10071 | <filename>digitaltape.py
# tape variables
TS_MAX=1000
# the digital tape model
class DTape:
def __init__(self,size,alphabet,noopidx=0):
if size>TS_MAX:
self.size=TS_MAX
else:
self.size=size
if len(alphabet)==0:
raise Exception('alphabet has zero symbols')
self.alphabet=alphabet
self.data=[self.alphabet[noopidx] for x in range(self.size)]
class DTapeMC:
def __init__(self,dtape,cmdmap,noopsym):
self.tape=dtape
self.thead=0
self.cmdmap=cmdmap
self.noopsym=noopsym
self.jmpctr=1
def process_cell(self):
if self.thead>=len(self.tape.data) or self.thead<0:
print('[TAPEBOUND_EXCEEDED] machine head @[%d] is beyond tape'%self.thead)
return
datum=self.tape.data[self.thead]
print('evaluating: %s'%datum)
if datum==self.noopsym:
print('noop')
else:
eval(cmdmap[datum])
self.thead+=self.jmpctr
class DTapeComputer:
def __init__(self,dtapemc,casetteimg):
self.tapemc=dtapemc
| <filename>digitaltape.py
# tape variables
TS_MAX=1000
# the digital tape model
class DTape:
def __init__(self,size,alphabet,noopidx=0):
if size>TS_MAX:
self.size=TS_MAX
else:
self.size=size
if len(alphabet)==0:
raise Exception('alphabet has zero symbols')
self.alphabet=alphabet
self.data=[self.alphabet[noopidx] for x in range(self.size)]
class DTapeMC:
def __init__(self,dtape,cmdmap,noopsym):
self.tape=dtape
self.thead=0
self.cmdmap=cmdmap
self.noopsym=noopsym
self.jmpctr=1
def process_cell(self):
if self.thead>=len(self.tape.data) or self.thead<0:
print('[TAPEBOUND_EXCEEDED] machine head @[%d] is beyond tape'%self.thead)
return
datum=self.tape.data[self.thead]
print('evaluating: %s'%datum)
if datum==self.noopsym:
print('noop')
else:
eval(cmdmap[datum])
self.thead+=self.jmpctr
class DTapeComputer:
def __init__(self,dtapemc,casetteimg):
self.tapemc=dtapemc
| en | 0.581374 | # tape variables # the digital tape model | 3.096112 | 3 |
webhook-cdk/lambda/vars.py | ncalteen/github-webhook-lambda-example | 0 | 10072 | <reponame>ncalteen/github-webhook-lambda-example
import json
# Output must be returned in the format mentioned below:
# https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format
lambda_response = {
"isBase64Encoded": False,
"statusCode": 200,
"headers": {
"Content-Type": "application/json",
},
"body": json.dumps({
"Status": "OK"
})
}
| import json
# Output must be returned in the format mentioned below:
# https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format
lambda_response = {
"isBase64Encoded": False,
"statusCode": 200,
"headers": {
"Content-Type": "application/json",
},
"body": json.dumps({
"Status": "OK"
})
} | en | 0.769117 | # Output must be returned in the format mentioned below: # https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format | 2.03945 | 2 |
gym_unblockme/envs/unblockme_render.py | fedingo/gym-unblockme | 3 | 10073 | import pygame
import time
import numpy as np
import sys
gray = (150, 150, 150)
white = (255, 255, 255)
black = (0, 0, 0, )
red_block = (255, 0, 0)
red_border = (76, 0, 19)
block_color = (255, 128, 0)
border_color = (165,42,42)
screen = None
SIDE = 50
BORDER = 5
MARGIN = 5
LINE = 1
h_switch = True
def __draw_horizontal_block(x,y):
global screen, h_switch
pygame.draw.rect(screen, border_color, pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, block_color, pygame.Rect(MARGIN + y*SIDE + h_switch*BORDER, MARGIN + x*SIDE + BORDER,
SIDE - BORDER, SIDE - 2*BORDER))
h_switch = not h_switch
def __draw_red_block(x,y):
global screen, h_switch
pygame.draw.rect(screen, red_border, pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, red_block, pygame.Rect(MARGIN + y*SIDE + h_switch*BORDER, MARGIN + x*SIDE + BORDER,
SIDE - BORDER, SIDE - 2*BORDER))
h_switch = not h_switch
def __draw_vertical_block(x,y):
global screen
pygame.draw.rect(screen, border_color, pygame.Rect(MARGIN + y*SIDE, MARGIN + x*SIDE, SIDE, 2*SIDE))
pygame.draw.rect(screen, block_color, pygame.Rect(MARGIN + y*SIDE + BORDER, MARGIN + x*SIDE + BORDER,
SIDE - 2*BORDER, 2*SIDE - 2*BORDER))
## Render function for the unblockme_class
def render_unblockme(game_object):
matrix = game_object.internal_state
k, h, _ = game_object.shape
global screen
if screen is None:
pygame.init()
screen = pygame.display.set_mode((2*MARGIN+k*SIDE, 2*MARGIN+h*SIDE))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
sys.exit(0)
screen.fill(black)
# first we draw the background
for x in range(0,k):
for y in range(0,h):
cell = matrix[x,y,:]
selected_block = np.where(cell == 1)[0]
if len(selected_block) != 0:
#draw the exit on the outer border
if selected_block[0] == 0:
if y == 0:
pygame.draw.rect(screen, white, pygame.Rect(y*SIDE,x*SIDE+MARGIN, SIDE+MARGIN, SIDE))
else:
pygame.draw.rect(screen, white, pygame.Rect(y*SIDE+MARGIN,x*SIDE+MARGIN, SIDE+MARGIN, SIDE))
# Draw the background with the grid pattern
pygame.draw.rect(screen, gray , pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, white, pygame.Rect(MARGIN + y*SIDE + LINE,MARGIN + x*SIDE + LINE,
SIDE - 2*LINE, SIDE - 2*LINE))
# then we draw the blocks in the grid
for x in range(0,k):
for y in range(0,h):
cell = matrix[x,y,1:]
selected_block = np.where(cell == 1)[0]
if len(selected_block) != 0:
if selected_block[-1] == 1:
__draw_horizontal_block(x,y)
elif selected_block[-1] == 2:
if (x == 0 or not (matrix[x-1,y,1:] == cell).all() ) and \
(x != k-1 and (matrix[x+1,y,1:] == cell).all() ):
__draw_vertical_block(x,y)
elif selected_block[-1] == 0:
__draw_red_block(x,y)
pygame.display.update()
time.sleep(0.1)
if __name__ == "__main__":
from unblockme_class import *
matrix, goal = get_example()
game = unblock_me(matrix, goal)
render_unblockme(game) | import pygame
import time
import numpy as np
import sys
gray = (150, 150, 150)
white = (255, 255, 255)
black = (0, 0, 0, )
red_block = (255, 0, 0)
red_border = (76, 0, 19)
block_color = (255, 128, 0)
border_color = (165,42,42)
screen = None
SIDE = 50
BORDER = 5
MARGIN = 5
LINE = 1
h_switch = True
def __draw_horizontal_block(x,y):
global screen, h_switch
pygame.draw.rect(screen, border_color, pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, block_color, pygame.Rect(MARGIN + y*SIDE + h_switch*BORDER, MARGIN + x*SIDE + BORDER,
SIDE - BORDER, SIDE - 2*BORDER))
h_switch = not h_switch
def __draw_red_block(x,y):
global screen, h_switch
pygame.draw.rect(screen, red_border, pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, red_block, pygame.Rect(MARGIN + y*SIDE + h_switch*BORDER, MARGIN + x*SIDE + BORDER,
SIDE - BORDER, SIDE - 2*BORDER))
h_switch = not h_switch
def __draw_vertical_block(x,y):
global screen
pygame.draw.rect(screen, border_color, pygame.Rect(MARGIN + y*SIDE, MARGIN + x*SIDE, SIDE, 2*SIDE))
pygame.draw.rect(screen, block_color, pygame.Rect(MARGIN + y*SIDE + BORDER, MARGIN + x*SIDE + BORDER,
SIDE - 2*BORDER, 2*SIDE - 2*BORDER))
## Render function for the unblockme_class
def render_unblockme(game_object):
matrix = game_object.internal_state
k, h, _ = game_object.shape
global screen
if screen is None:
pygame.init()
screen = pygame.display.set_mode((2*MARGIN+k*SIDE, 2*MARGIN+h*SIDE))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
sys.exit(0)
screen.fill(black)
# first we draw the background
for x in range(0,k):
for y in range(0,h):
cell = matrix[x,y,:]
selected_block = np.where(cell == 1)[0]
if len(selected_block) != 0:
#draw the exit on the outer border
if selected_block[0] == 0:
if y == 0:
pygame.draw.rect(screen, white, pygame.Rect(y*SIDE,x*SIDE+MARGIN, SIDE+MARGIN, SIDE))
else:
pygame.draw.rect(screen, white, pygame.Rect(y*SIDE+MARGIN,x*SIDE+MARGIN, SIDE+MARGIN, SIDE))
# Draw the background with the grid pattern
pygame.draw.rect(screen, gray , pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, white, pygame.Rect(MARGIN + y*SIDE + LINE,MARGIN + x*SIDE + LINE,
SIDE - 2*LINE, SIDE - 2*LINE))
# then we draw the blocks in the grid
for x in range(0,k):
for y in range(0,h):
cell = matrix[x,y,1:]
selected_block = np.where(cell == 1)[0]
if len(selected_block) != 0:
if selected_block[-1] == 1:
__draw_horizontal_block(x,y)
elif selected_block[-1] == 2:
if (x == 0 or not (matrix[x-1,y,1:] == cell).all() ) and \
(x != k-1 and (matrix[x+1,y,1:] == cell).all() ):
__draw_vertical_block(x,y)
elif selected_block[-1] == 0:
__draw_red_block(x,y)
pygame.display.update()
time.sleep(0.1)
if __name__ == "__main__":
from unblockme_class import *
matrix, goal = get_example()
game = unblock_me(matrix, goal)
render_unblockme(game) | en | 0.751741 | ## Render function for the unblockme_class # first we draw the background #draw the exit on the outer border # Draw the background with the grid pattern # then we draw the blocks in the grid | 3.054274 | 3 |
scaffolds/__init__.py | chhsiao1981/frontend_template | 0 | 10074 | # API
from pyramid.scaffolds import PyramidTemplate
import os
import re
import logging
def _camelcase_to_upper_camel_case(the_str):
if not the_str:
return ''
return the_str[0].upper() + the_str[1:]
def _upper_camelcase_to_camelcase(the_str):
if not the_str:
return ''
return the_str[0].lower() + the_str[1:]
def _camelcase_to_constant(the_str):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', the_str)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).upper()
class MyTemplate(PyramidTemplate):
def pre(self, command, output_dir, vars):
the_args = command.args
module_name = '' if not isinstance(the_args, list) or len(the_args) < 2 else the_args[1]
logging.warning('command: %s output_dir: %s vars: %s args: %s module_name: %s', command, output_dir, vars, command.args, module_name)
self._setup_module(vars, module_name)
return PyramidTemplate.pre(self, command, output_dir, vars)
def _setup_module(self, vars, full_module_name):
full_module_path = full_module_name.replace('.', os.path.sep)
module_name = os.path.basename(full_module_path)
class_name = _camelcase_to_upper_camel_case(module_name)
constant_name = _camelcase_to_constant(module_name)
sub_pkg_dir = os.path.dirname(full_module_path)
sub_pkg_name = sub_pkg_dir.replace(os.path.sep, '.')
test_name = '' if not module_name else 'test' + class_name
sub_pkg_dir_list = [] if not sub_pkg_dir else sub_pkg_dir.split(os.path.sep)
test_dir_list = ['test_' + each_pkg for each_pkg in sub_pkg_dir_list]
test_dir = os.path.sep.join(test_dir_list)
pkg_name = vars['package']
if sub_pkg_name:
pkg_name += '.' + sub_pkg_name
project_name = vars['project']
vars['module_name'] = module_name
vars['class_name'] = class_name
vars['sub_pkg_name'] = sub_pkg_name
vars['sub_pkg_dir'] = sub_pkg_dir
vars['constant_name'] = constant_name
vars['test_name'] = test_name
vars['test_dir'] = test_dir
vars['pkg_name'] = pkg_name
vars['project_name'] = project_name
class ComponentProjectTemplate(MyTemplate):
_template_dir = 'component'
summary = 'component'
class ContainerProjectTemplate(MyTemplate):
_template_dir = 'container'
summary = 'container'
class SubContainerProjectTemplate(MyTemplate):
_template_dir = 'subcontainer'
summary = 'subcontainer'
class ModuleProjectTemplate(MyTemplate):
_template_dir = 'module'
summary = 'module'
class InitStarterProjectTemplate(MyTemplate):
_template_dir = 'init_starter'
summary = 'including store / middleware / utils'
class InitDevProjectTemplate(MyTemplate):
_template_dir = 'init_dev'
summary = 'starting project'
| # API
from pyramid.scaffolds import PyramidTemplate
import os
import re
import logging
def _camelcase_to_upper_camel_case(the_str):
if not the_str:
return ''
return the_str[0].upper() + the_str[1:]
def _upper_camelcase_to_camelcase(the_str):
if not the_str:
return ''
return the_str[0].lower() + the_str[1:]
def _camelcase_to_constant(the_str):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', the_str)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).upper()
class MyTemplate(PyramidTemplate):
def pre(self, command, output_dir, vars):
the_args = command.args
module_name = '' if not isinstance(the_args, list) or len(the_args) < 2 else the_args[1]
logging.warning('command: %s output_dir: %s vars: %s args: %s module_name: %s', command, output_dir, vars, command.args, module_name)
self._setup_module(vars, module_name)
return PyramidTemplate.pre(self, command, output_dir, vars)
def _setup_module(self, vars, full_module_name):
full_module_path = full_module_name.replace('.', os.path.sep)
module_name = os.path.basename(full_module_path)
class_name = _camelcase_to_upper_camel_case(module_name)
constant_name = _camelcase_to_constant(module_name)
sub_pkg_dir = os.path.dirname(full_module_path)
sub_pkg_name = sub_pkg_dir.replace(os.path.sep, '.')
test_name = '' if not module_name else 'test' + class_name
sub_pkg_dir_list = [] if not sub_pkg_dir else sub_pkg_dir.split(os.path.sep)
test_dir_list = ['test_' + each_pkg for each_pkg in sub_pkg_dir_list]
test_dir = os.path.sep.join(test_dir_list)
pkg_name = vars['package']
if sub_pkg_name:
pkg_name += '.' + sub_pkg_name
project_name = vars['project']
vars['module_name'] = module_name
vars['class_name'] = class_name
vars['sub_pkg_name'] = sub_pkg_name
vars['sub_pkg_dir'] = sub_pkg_dir
vars['constant_name'] = constant_name
vars['test_name'] = test_name
vars['test_dir'] = test_dir
vars['pkg_name'] = pkg_name
vars['project_name'] = project_name
class ComponentProjectTemplate(MyTemplate):
_template_dir = 'component'
summary = 'component'
class ContainerProjectTemplate(MyTemplate):
_template_dir = 'container'
summary = 'container'
class SubContainerProjectTemplate(MyTemplate):
_template_dir = 'subcontainer'
summary = 'subcontainer'
class ModuleProjectTemplate(MyTemplate):
_template_dir = 'module'
summary = 'module'
class InitStarterProjectTemplate(MyTemplate):
_template_dir = 'init_starter'
summary = 'including store / middleware / utils'
class InitDevProjectTemplate(MyTemplate):
_template_dir = 'init_dev'
summary = 'starting project'
| none | 1 | 2.615994 | 3 |
|
phy/cluster/tests/test_supervisor.py | mikailweston/phy | 0 | 10075 | # -*- coding: utf-8 -*-
"""Test GUI component."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
#from contextlib import contextmanager
from pytest import yield_fixture, fixture, raises
import numpy as np
from numpy.testing import assert_array_equal as ae
from .. import supervisor as _supervisor
from ..supervisor import (Supervisor,
TaskLogger,
ClusterView,
SimilarityView,
ActionCreator,
)
from phy.gui import GUI
from phy.gui.widgets import Barrier
from phy.gui.qt import qInstallMessageHandler
from phy.gui.tests.test_widgets import _assert, _wait_until_table_ready
from phy.utils.context import Context
from phylib.utils import connect, Bunch, emit
def handler(msg_type, msg_log_context, msg_string):
pass
qInstallMessageHandler(handler)
#------------------------------------------------------------------------------
# Fixtures
#------------------------------------------------------------------------------
@yield_fixture
def gui(tempdir, qtbot):
# NOTE: mock patch show box exec_
_supervisor._show_box = lambda _: _
gui = GUI(position=(200, 100), size=(500, 500), config_dir=tempdir)
gui.set_default_actions()
gui.show()
qtbot.waitForWindowShown(gui)
yield gui
qtbot.wait(5)
gui.close()
del gui
qtbot.wait(5)
@fixture
def supervisor(qtbot, gui, cluster_ids, cluster_groups, cluster_labels,
similarity, tempdir):
spike_clusters = np.repeat(cluster_ids, 2)
s = Supervisor(
spike_clusters,
cluster_groups=cluster_groups,
cluster_labels=cluster_labels,
similarity=similarity,
context=Context(tempdir),
sort=('id', 'desc'),
)
s.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=s.cluster_view)
connect(b('similarity_view'), event='ready', sender=s.similarity_view)
b.wait()
return s
#------------------------------------------------------------------------------
# Test tasks
#------------------------------------------------------------------------------
@fixture
def tl():
class MockClusterView(object):
_selected = [0]
def select(self, cl, callback=None, **kwargs):
self._selected = cl
callback({'selected': cl, 'next': cl[-1] + 1})
def next(self, callback=None):
callback({'selected': [self._selected[-1] + 1], 'next': self._selected[-1] + 2})
def previous(self, callback=None): # pragma: no cover
callback({'selected': [self._selected[-1] - 1], 'next': self._selected[-1]})
class MockSimilarityView(MockClusterView):
pass
class MockSupervisor(object):
def merge(self, cluster_ids, to, callback=None):
callback(Bunch(deleted=cluster_ids, added=[to]))
def split(self, old_cluster_ids, new_cluster_ids, callback=None):
callback(Bunch(deleted=old_cluster_ids, added=new_cluster_ids))
def move(self, which, group, callback=None):
callback(Bunch(metadata_changed=which, metadata_value=group))
def undo(self, callback=None):
callback(Bunch())
def redo(self, callback=None):
callback(Bunch())
out = TaskLogger(MockClusterView(), MockSimilarityView(), MockSupervisor())
return out
def test_task_1(tl):
assert tl.last_state(None) is None
def test_task_2(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.process()
assert tl.last_state() == ([0], 1, None, None)
def test_task_3(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.process()
assert tl.last_state() == ([0], 1, [100], 101)
def test_task_merge(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'merge', [0, 100], 1000)
tl.process()
assert tl.last_state() == ([1000], 1001, None, None)
tl.enqueue(tl.supervisor, 'undo')
tl.process()
assert tl.last_state() == ([0], 1, [100], 101)
tl.enqueue(tl.supervisor, 'redo')
tl.process()
assert tl.last_state() == ([1000], 1001, None, None)
def test_task_split(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'split', [0, 100], [1000, 1001])
tl.process()
assert tl.last_state() == ([1000, 1001], 1002, None, None)
def test_task_move_1(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.supervisor, 'move', [0], 'good')
tl.process()
assert tl.last_state() == ([1], 2, None, None)
def test_task_move_best(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'best', 'good')
tl.process()
assert tl.last_state() == ([1], 2, None, None)
def test_task_move_similar(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'similar', 'good')
tl.process()
assert tl.last_state() == ([0], 1, [101], 102)
def test_task_move_all(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'all', 'good')
tl.process()
assert tl.last_state() == ([1], 2, [101], 102)
#------------------------------------------------------------------------------
# Test cluster and similarity views
#------------------------------------------------------------------------------
@fixture
def data():
_data = [{"id": i,
"n_spikes": 100 - 10 * i,
"group": {2: 'noise', 3: 'noise', 5: 'mua', 8: 'good'}.get(i, None),
"is_masked": i in (2, 3, 5),
} for i in range(10)]
return _data
def test_cluster_view_1(qtbot, gui, data):
cv = ClusterView(gui, data=data)
_wait_until_table_ready(qtbot, cv)
cv.sort_by('n_spikes', 'asc')
cv.select([1])
qtbot.wait(10)
assert cv.state == {'current_sort': ('n_spikes', 'asc'), 'selected': [1]}
cv.set_state({'current_sort': ('id', 'desc'), 'selected': [2]})
assert cv.state == {'current_sort': ('id', 'desc'), 'selected': [2]}
def test_similarity_view_1(qtbot, gui, data):
sv = SimilarityView(gui, data=data)
_wait_until_table_ready(qtbot, sv)
@connect(sender=sv)
def on_request_similar_clusters(sender, cluster_id):
return [{'id': id} for id in (100 + cluster_id, 110 + cluster_id, 102 + cluster_id)]
sv.reset([5])
_assert(sv.get_ids, [105, 115, 107])
def test_cluster_view_extra_columns(qtbot, gui, data):
for cl in data:
cl['my_metrics'] = cl['id'] * 1000
cv = ClusterView(gui, data=data, columns=['id', 'n_spikes', 'my_metrics'])
_wait_until_table_ready(qtbot, cv)
#------------------------------------------------------------------------------
# Test ActionCreator
#------------------------------------------------------------------------------
def test_action_creator_1(qtbot, gui):
ac = ActionCreator()
ac.attach(gui)
gui.show()
#------------------------------------------------------------------------------
# Test GUI component
#------------------------------------------------------------------------------
def _select(supervisor, cluster_ids, similar=None):
supervisor.task_logger.enqueue(supervisor.cluster_view, 'select', cluster_ids)
if similar is not None:
supervisor.task_logger.enqueue(supervisor.similarity_view, 'select', similar)
supervisor.task_logger.process()
supervisor.block()
supervisor.task_logger.show_history()
assert supervisor.task_logger.last_state()[0] == cluster_ids
assert supervisor.task_logger.last_state()[2] == similar
def _assert_selected(supervisor, sel):
assert supervisor.selected == sel
def test_select(qtbot, supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
def test_supervisor_busy(qtbot, supervisor):
_select(supervisor, [30], [20])
o = object()
emit('is_busy', o, True)
assert supervisor._is_busy
# The action fails while the supervisor is busy.
with raises(RuntimeError):
emit('action', supervisor.action_creator, 'merge')
emit('is_busy', o, False)
assert not supervisor._is_busy
# The action succeeds because the supervisor is no longer busy.
emit('action', supervisor.action_creator, 'merge')
supervisor.block()
assert not supervisor._is_busy
def test_supervisor_cluster_metrics(
qtbot, gui, cluster_ids, cluster_groups, similarity, tempdir):
spike_clusters = np.repeat(cluster_ids, 2)
def my_metrics(cluster_id):
return cluster_id ** 2
cluster_metrics = {'my_metrics': my_metrics}
mc = Supervisor(spike_clusters,
cluster_groups=cluster_groups,
cluster_metrics=cluster_metrics,
similarity=similarity,
context=Context(tempdir),
)
mc.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=mc.cluster_view)
connect(b('similarity_view'), event='ready', sender=mc.similarity_view)
b.wait()
assert 'my_metrics' in mc.columns
def test_supervisor_select_1(qtbot, supervisor):
# WARNING: always use actions in tests, because this doesn't call
# the supervisor method directly, but raises an event, enqueue the task,
# and call TaskLogger.process() which handles the cascade of callbacks.
supervisor.select_actions.select([0])
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.task_logger.show_history()
def test_supervisor_color(qtbot, supervisor):
supervisor.view_actions.colormap_linear()
supervisor.view_actions.color_field_n_spikes()
supervisor.view_actions.toggle_categorical_colormap(False)
supervisor.view_actions.toggle_logarithmic_colormap(True)
def test_supervisor_select_2(qtbot, supervisor):
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [30])
def test_supervisor_select_order(qtbot, supervisor):
_select(supervisor, [1, 0])
_assert_selected(supervisor, [1, 0])
_select(supervisor, [0, 1])
_assert_selected(supervisor, [0, 1])
def test_supervisor_edge_cases(supervisor):
# Empty selection at first.
ae(supervisor.clustering.cluster_ids, [0, 1, 2, 10, 11, 20, 30])
_select(supervisor, [0])
supervisor.undo()
supervisor.block()
supervisor.redo()
supervisor.block()
# Merge.
supervisor.merge()
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.merge([])
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.merge([10])
supervisor.block()
_assert_selected(supervisor, [0])
# Split.
supervisor.split([])
supervisor.block()
_assert_selected(supervisor, [0])
# Move.
supervisor.move('ignored', [])
supervisor.block()
supervisor.save()
def test_supervisor_save(qtbot, gui, supervisor):
emit('request_save', gui)
def test_supervisor_skip(qtbot, gui, supervisor):
# yield [0, 1, 2, 10, 11, 20, 30]
# # i, g, N, i, g, N, N
expected = [30, 20, 11, 2, 1]
for clu in expected:
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [clu])
def test_supervisor_sort(qtbot, supervisor):
supervisor.sort('id', 'desc')
qtbot.wait(50)
assert supervisor.state.cluster_view.current_sort == ('id', 'desc')
supervisor.select_actions.sort_by_n_spikes()
qtbot.wait(50)
assert supervisor.state.cluster_view.current_sort == ('n_spikes', 'desc')
def test_supervisor_filter(qtbot, supervisor):
supervisor.filter('5 <= id && id <= 20')
qtbot.wait(50)
_cl = []
supervisor.cluster_view.get_ids(lambda cluster_ids: _cl.extend(cluster_ids))
qtbot.wait(50)
assert _cl == [20, 11, 10]
def test_supervisor_merge_1(qtbot, supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.merge()
supervisor.block()
_assert_selected(supervisor, [31])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [30, 20])
supervisor.actions.redo()
supervisor.block()
supervisor.task_logger.show_history()
_assert_selected(supervisor, [31])
assert supervisor.is_dirty()
def test_supervisor_merge_event(qtbot, supervisor):
_select(supervisor, [30], [20])
_l = []
@connect(sender=supervisor)
def on_select(sender, cluster_ids):
_l.append(cluster_ids)
supervisor.actions.merge()
supervisor.block()
# After a merge, there should be only one select event.
assert len(_l) == 1
def test_supervisor_merge_move(qtbot, supervisor):
"""Check that merge then move selects the next cluster in the original
cluster view, not the updated cluster view."""
_select(supervisor, [20, 11], [])
_assert_selected(supervisor, [20, 11])
supervisor.actions.merge()
supervisor.block()
_assert_selected(supervisor, [31])
supervisor.actions.move('good', 'all')
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.actions.move('good', 'all')
supervisor.block()
_assert_selected(supervisor, [2])
def test_supervisor_split_0(qtbot, supervisor):
_select(supervisor, [1, 2])
_assert_selected(supervisor, [1, 2])
supervisor.actions.split([1, 2])
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [1, 2])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
def test_supervisor_split_1(supervisor):
supervisor.select_actions.select([1, 2])
supervisor.block()
@connect(sender=supervisor)
def on_request_split(sender):
return [1, 2]
supervisor.actions.split()
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
def test_supervisor_split_2(gui, similarity):
spike_clusters = np.array([0, 0, 1])
supervisor = Supervisor(spike_clusters,
similarity=similarity,
)
supervisor.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=supervisor.cluster_view)
connect(b('similarity_view'), event='ready', sender=supervisor.similarity_view)
b.wait()
supervisor.actions.split([0])
supervisor.block()
_assert_selected(supervisor, [2, 3])
def test_supervisor_state(tempdir, qtbot, gui, supervisor):
supervisor.select(1)
cv = supervisor.cluster_view
assert supervisor.state.cluster_view.current_sort == ('id', 'desc')
assert supervisor.state.cluster_view.selected == [1]
cv.sort_by('id')
assert supervisor.state.cluster_view.current_sort == ('id', 'asc')
cv.set_state({'current_sort': ('n_spikes', 'desc')})
assert supervisor.state.cluster_view.current_sort == ('n_spikes', 'desc')
cv.sort_by('id', 'desc')
assert supervisor.all_cluster_ids == [30, 20, 11, 10, 2, 1, 0]
def test_supervisor_label(supervisor):
_select(supervisor, [20])
supervisor.label("my_field", 3.14)
supervisor.block()
supervisor.label("my_field", 1.23, cluster_ids=30)
supervisor.block()
assert 'my_field' in supervisor.fields
assert supervisor.get_labels('my_field')[20] == 3.14
assert supervisor.get_labels('my_field')[30] == 1.23
def test_supervisor_label_cluster_1(supervisor):
_select(supervisor, [20, 30])
supervisor.label("my_field", 3.14)
supervisor.block()
# Same value for the old clusters.
l = supervisor.get_labels('my_field')
assert l[20] == l[30] == 3.14
up = supervisor.merge()
supervisor.block()
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_label_cluster_2(supervisor):
_select(supervisor, [20])
supervisor.label("my_field", 3.14)
supervisor.block()
# One of the parents.
l = supervisor.get_labels('my_field')
assert l[20] == 3.14
assert l[30] is None
up = supervisor.merge([20, 30])
supervisor.block()
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_label_cluster_3(supervisor):
# Conflict: largest cluster wins.
_select(supervisor, [20, 30])
supervisor.label("my_field", 3.14)
supervisor.block()
# Create merged cluster from 20 and 30.
up = supervisor.merge()
new = up.added[0]
supervisor.block()
# It fot the label of its parents.
assert supervisor.get_labels('my_field')[new] == 3.14
# Now, we label a smaller cluster.
supervisor.label("my_field", 2.718, cluster_ids=[10])
# We merge the large and small cluster together.
up = supervisor.merge(up.added + [10])
supervisor.block()
# The new cluster should have the value of the first, merged big cluster, i.e. 3.14.
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_move_1(supervisor):
_select(supervisor, [20])
_assert_selected(supervisor, [20])
assert not supervisor.move('', '')
supervisor.actions.move('noise', 'all')
supervisor.block()
_assert_selected(supervisor, [11])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [11])
def test_supervisor_move_2(supervisor):
_select(supervisor, [20], [10])
_assert_selected(supervisor, [20, 10])
supervisor.actions.move('noise', 10)
supervisor.block()
_assert_selected(supervisor, [20, 2])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [20, 10])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [20, 2])
def test_supervisor_move_3(qtbot, supervisor):
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.actions.move_best_to_noise()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.actions.move_best_to_mua()
supervisor.block()
_assert_selected(supervisor, [11])
supervisor.actions.move_best_to_good()
supervisor.block()
_assert_selected(supervisor, [2])
supervisor.cluster_meta.get('group', 30) == 'noise'
supervisor.cluster_meta.get('group', 20) == 'mua'
supervisor.cluster_meta.get('group', 11) == 'good'
def test_supervisor_move_4(supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.move_similar_to_noise()
supervisor.block()
_assert_selected(supervisor, [30, 11])
supervisor.actions.move_similar_to_mua()
supervisor.block()
_assert_selected(supervisor, [30, 2])
supervisor.actions.move_similar_to_good()
supervisor.block()
_assert_selected(supervisor, [30, 1])
supervisor.cluster_meta.get('group', 20) == 'noise'
supervisor.cluster_meta.get('group', 11) == 'mua'
supervisor.cluster_meta.get('group', 2) == 'good'
def test_supervisor_move_5(supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.move_all_to_noise()
supervisor.block()
_assert_selected(supervisor, [11, 2])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [11, 1])
supervisor.actions.move_all_to_mua()
supervisor.block()
_assert_selected(supervisor, [2])
supervisor.actions.move_all_to_good()
supervisor.block()
_assert_selected(supervisor, [])
supervisor.cluster_meta.get('group', 30) == 'noise'
supervisor.cluster_meta.get('group', 20) == 'noise'
supervisor.cluster_meta.get('group', 11) == 'mua'
supervisor.cluster_meta.get('group', 10) == 'mua'
supervisor.cluster_meta.get('group', 2) == 'good'
supervisor.cluster_meta.get('group', 1) == 'good'
def test_supervisor_reset(qtbot, supervisor):
supervisor.select_actions.select([10, 11])
supervisor.select_actions.reset_wizard()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30, 20])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30, 11])
supervisor.select_actions.previous()
supervisor.block()
_assert_selected(supervisor, [30, 20])
def test_supervisor_nav(qtbot, supervisor):
supervisor.select_actions.reset_wizard()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.select_actions.previous_best()
supervisor.block()
_assert_selected(supervisor, [30])
| # -*- coding: utf-8 -*-
"""Test GUI component."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
#from contextlib import contextmanager
from pytest import yield_fixture, fixture, raises
import numpy as np
from numpy.testing import assert_array_equal as ae
from .. import supervisor as _supervisor
from ..supervisor import (Supervisor,
TaskLogger,
ClusterView,
SimilarityView,
ActionCreator,
)
from phy.gui import GUI
from phy.gui.widgets import Barrier
from phy.gui.qt import qInstallMessageHandler
from phy.gui.tests.test_widgets import _assert, _wait_until_table_ready
from phy.utils.context import Context
from phylib.utils import connect, Bunch, emit
def handler(msg_type, msg_log_context, msg_string):
pass
qInstallMessageHandler(handler)
#------------------------------------------------------------------------------
# Fixtures
#------------------------------------------------------------------------------
@yield_fixture
def gui(tempdir, qtbot):
# NOTE: mock patch show box exec_
_supervisor._show_box = lambda _: _
gui = GUI(position=(200, 100), size=(500, 500), config_dir=tempdir)
gui.set_default_actions()
gui.show()
qtbot.waitForWindowShown(gui)
yield gui
qtbot.wait(5)
gui.close()
del gui
qtbot.wait(5)
@fixture
def supervisor(qtbot, gui, cluster_ids, cluster_groups, cluster_labels,
similarity, tempdir):
spike_clusters = np.repeat(cluster_ids, 2)
s = Supervisor(
spike_clusters,
cluster_groups=cluster_groups,
cluster_labels=cluster_labels,
similarity=similarity,
context=Context(tempdir),
sort=('id', 'desc'),
)
s.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=s.cluster_view)
connect(b('similarity_view'), event='ready', sender=s.similarity_view)
b.wait()
return s
#------------------------------------------------------------------------------
# Test tasks
#------------------------------------------------------------------------------
@fixture
def tl():
class MockClusterView(object):
_selected = [0]
def select(self, cl, callback=None, **kwargs):
self._selected = cl
callback({'selected': cl, 'next': cl[-1] + 1})
def next(self, callback=None):
callback({'selected': [self._selected[-1] + 1], 'next': self._selected[-1] + 2})
def previous(self, callback=None): # pragma: no cover
callback({'selected': [self._selected[-1] - 1], 'next': self._selected[-1]})
class MockSimilarityView(MockClusterView):
pass
class MockSupervisor(object):
def merge(self, cluster_ids, to, callback=None):
callback(Bunch(deleted=cluster_ids, added=[to]))
def split(self, old_cluster_ids, new_cluster_ids, callback=None):
callback(Bunch(deleted=old_cluster_ids, added=new_cluster_ids))
def move(self, which, group, callback=None):
callback(Bunch(metadata_changed=which, metadata_value=group))
def undo(self, callback=None):
callback(Bunch())
def redo(self, callback=None):
callback(Bunch())
out = TaskLogger(MockClusterView(), MockSimilarityView(), MockSupervisor())
return out
def test_task_1(tl):
assert tl.last_state(None) is None
def test_task_2(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.process()
assert tl.last_state() == ([0], 1, None, None)
def test_task_3(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.process()
assert tl.last_state() == ([0], 1, [100], 101)
def test_task_merge(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'merge', [0, 100], 1000)
tl.process()
assert tl.last_state() == ([1000], 1001, None, None)
tl.enqueue(tl.supervisor, 'undo')
tl.process()
assert tl.last_state() == ([0], 1, [100], 101)
tl.enqueue(tl.supervisor, 'redo')
tl.process()
assert tl.last_state() == ([1000], 1001, None, None)
def test_task_split(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'split', [0, 100], [1000, 1001])
tl.process()
assert tl.last_state() == ([1000, 1001], 1002, None, None)
def test_task_move_1(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.supervisor, 'move', [0], 'good')
tl.process()
assert tl.last_state() == ([1], 2, None, None)
def test_task_move_best(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'best', 'good')
tl.process()
assert tl.last_state() == ([1], 2, None, None)
def test_task_move_similar(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'similar', 'good')
tl.process()
assert tl.last_state() == ([0], 1, [101], 102)
def test_task_move_all(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'all', 'good')
tl.process()
assert tl.last_state() == ([1], 2, [101], 102)
#------------------------------------------------------------------------------
# Test cluster and similarity views
#------------------------------------------------------------------------------
@fixture
def data():
_data = [{"id": i,
"n_spikes": 100 - 10 * i,
"group": {2: 'noise', 3: 'noise', 5: 'mua', 8: 'good'}.get(i, None),
"is_masked": i in (2, 3, 5),
} for i in range(10)]
return _data
def test_cluster_view_1(qtbot, gui, data):
cv = ClusterView(gui, data=data)
_wait_until_table_ready(qtbot, cv)
cv.sort_by('n_spikes', 'asc')
cv.select([1])
qtbot.wait(10)
assert cv.state == {'current_sort': ('n_spikes', 'asc'), 'selected': [1]}
cv.set_state({'current_sort': ('id', 'desc'), 'selected': [2]})
assert cv.state == {'current_sort': ('id', 'desc'), 'selected': [2]}
def test_similarity_view_1(qtbot, gui, data):
sv = SimilarityView(gui, data=data)
_wait_until_table_ready(qtbot, sv)
@connect(sender=sv)
def on_request_similar_clusters(sender, cluster_id):
return [{'id': id} for id in (100 + cluster_id, 110 + cluster_id, 102 + cluster_id)]
sv.reset([5])
_assert(sv.get_ids, [105, 115, 107])
def test_cluster_view_extra_columns(qtbot, gui, data):
for cl in data:
cl['my_metrics'] = cl['id'] * 1000
cv = ClusterView(gui, data=data, columns=['id', 'n_spikes', 'my_metrics'])
_wait_until_table_ready(qtbot, cv)
#------------------------------------------------------------------------------
# Test ActionCreator
#------------------------------------------------------------------------------
def test_action_creator_1(qtbot, gui):
ac = ActionCreator()
ac.attach(gui)
gui.show()
#------------------------------------------------------------------------------
# Test GUI component
#------------------------------------------------------------------------------
def _select(supervisor, cluster_ids, similar=None):
supervisor.task_logger.enqueue(supervisor.cluster_view, 'select', cluster_ids)
if similar is not None:
supervisor.task_logger.enqueue(supervisor.similarity_view, 'select', similar)
supervisor.task_logger.process()
supervisor.block()
supervisor.task_logger.show_history()
assert supervisor.task_logger.last_state()[0] == cluster_ids
assert supervisor.task_logger.last_state()[2] == similar
def _assert_selected(supervisor, sel):
assert supervisor.selected == sel
def test_select(qtbot, supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
def test_supervisor_busy(qtbot, supervisor):
_select(supervisor, [30], [20])
o = object()
emit('is_busy', o, True)
assert supervisor._is_busy
# The action fails while the supervisor is busy.
with raises(RuntimeError):
emit('action', supervisor.action_creator, 'merge')
emit('is_busy', o, False)
assert not supervisor._is_busy
# The action succeeds because the supervisor is no longer busy.
emit('action', supervisor.action_creator, 'merge')
supervisor.block()
assert not supervisor._is_busy
def test_supervisor_cluster_metrics(
qtbot, gui, cluster_ids, cluster_groups, similarity, tempdir):
spike_clusters = np.repeat(cluster_ids, 2)
def my_metrics(cluster_id):
return cluster_id ** 2
cluster_metrics = {'my_metrics': my_metrics}
mc = Supervisor(spike_clusters,
cluster_groups=cluster_groups,
cluster_metrics=cluster_metrics,
similarity=similarity,
context=Context(tempdir),
)
mc.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=mc.cluster_view)
connect(b('similarity_view'), event='ready', sender=mc.similarity_view)
b.wait()
assert 'my_metrics' in mc.columns
def test_supervisor_select_1(qtbot, supervisor):
# WARNING: always use actions in tests, because this doesn't call
# the supervisor method directly, but raises an event, enqueue the task,
# and call TaskLogger.process() which handles the cascade of callbacks.
supervisor.select_actions.select([0])
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.task_logger.show_history()
def test_supervisor_color(qtbot, supervisor):
supervisor.view_actions.colormap_linear()
supervisor.view_actions.color_field_n_spikes()
supervisor.view_actions.toggle_categorical_colormap(False)
supervisor.view_actions.toggle_logarithmic_colormap(True)
def test_supervisor_select_2(qtbot, supervisor):
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [30])
def test_supervisor_select_order(qtbot, supervisor):
_select(supervisor, [1, 0])
_assert_selected(supervisor, [1, 0])
_select(supervisor, [0, 1])
_assert_selected(supervisor, [0, 1])
def test_supervisor_edge_cases(supervisor):
# Empty selection at first.
ae(supervisor.clustering.cluster_ids, [0, 1, 2, 10, 11, 20, 30])
_select(supervisor, [0])
supervisor.undo()
supervisor.block()
supervisor.redo()
supervisor.block()
# Merge.
supervisor.merge()
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.merge([])
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.merge([10])
supervisor.block()
_assert_selected(supervisor, [0])
# Split.
supervisor.split([])
supervisor.block()
_assert_selected(supervisor, [0])
# Move.
supervisor.move('ignored', [])
supervisor.block()
supervisor.save()
def test_supervisor_save(qtbot, gui, supervisor):
emit('request_save', gui)
def test_supervisor_skip(qtbot, gui, supervisor):
# yield [0, 1, 2, 10, 11, 20, 30]
# # i, g, N, i, g, N, N
expected = [30, 20, 11, 2, 1]
for clu in expected:
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [clu])
def test_supervisor_sort(qtbot, supervisor):
supervisor.sort('id', 'desc')
qtbot.wait(50)
assert supervisor.state.cluster_view.current_sort == ('id', 'desc')
supervisor.select_actions.sort_by_n_spikes()
qtbot.wait(50)
assert supervisor.state.cluster_view.current_sort == ('n_spikes', 'desc')
def test_supervisor_filter(qtbot, supervisor):
supervisor.filter('5 <= id && id <= 20')
qtbot.wait(50)
_cl = []
supervisor.cluster_view.get_ids(lambda cluster_ids: _cl.extend(cluster_ids))
qtbot.wait(50)
assert _cl == [20, 11, 10]
def test_supervisor_merge_1(qtbot, supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.merge()
supervisor.block()
_assert_selected(supervisor, [31])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [30, 20])
supervisor.actions.redo()
supervisor.block()
supervisor.task_logger.show_history()
_assert_selected(supervisor, [31])
assert supervisor.is_dirty()
def test_supervisor_merge_event(qtbot, supervisor):
_select(supervisor, [30], [20])
_l = []
@connect(sender=supervisor)
def on_select(sender, cluster_ids):
_l.append(cluster_ids)
supervisor.actions.merge()
supervisor.block()
# After a merge, there should be only one select event.
assert len(_l) == 1
def test_supervisor_merge_move(qtbot, supervisor):
"""Check that merge then move selects the next cluster in the original
cluster view, not the updated cluster view."""
_select(supervisor, [20, 11], [])
_assert_selected(supervisor, [20, 11])
supervisor.actions.merge()
supervisor.block()
_assert_selected(supervisor, [31])
supervisor.actions.move('good', 'all')
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.actions.move('good', 'all')
supervisor.block()
_assert_selected(supervisor, [2])
def test_supervisor_split_0(qtbot, supervisor):
_select(supervisor, [1, 2])
_assert_selected(supervisor, [1, 2])
supervisor.actions.split([1, 2])
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [1, 2])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
def test_supervisor_split_1(supervisor):
supervisor.select_actions.select([1, 2])
supervisor.block()
@connect(sender=supervisor)
def on_request_split(sender):
return [1, 2]
supervisor.actions.split()
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
def test_supervisor_split_2(gui, similarity):
spike_clusters = np.array([0, 0, 1])
supervisor = Supervisor(spike_clusters,
similarity=similarity,
)
supervisor.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=supervisor.cluster_view)
connect(b('similarity_view'), event='ready', sender=supervisor.similarity_view)
b.wait()
supervisor.actions.split([0])
supervisor.block()
_assert_selected(supervisor, [2, 3])
def test_supervisor_state(tempdir, qtbot, gui, supervisor):
supervisor.select(1)
cv = supervisor.cluster_view
assert supervisor.state.cluster_view.current_sort == ('id', 'desc')
assert supervisor.state.cluster_view.selected == [1]
cv.sort_by('id')
assert supervisor.state.cluster_view.current_sort == ('id', 'asc')
cv.set_state({'current_sort': ('n_spikes', 'desc')})
assert supervisor.state.cluster_view.current_sort == ('n_spikes', 'desc')
cv.sort_by('id', 'desc')
assert supervisor.all_cluster_ids == [30, 20, 11, 10, 2, 1, 0]
def test_supervisor_label(supervisor):
_select(supervisor, [20])
supervisor.label("my_field", 3.14)
supervisor.block()
supervisor.label("my_field", 1.23, cluster_ids=30)
supervisor.block()
assert 'my_field' in supervisor.fields
assert supervisor.get_labels('my_field')[20] == 3.14
assert supervisor.get_labels('my_field')[30] == 1.23
def test_supervisor_label_cluster_1(supervisor):
_select(supervisor, [20, 30])
supervisor.label("my_field", 3.14)
supervisor.block()
# Same value for the old clusters.
l = supervisor.get_labels('my_field')
assert l[20] == l[30] == 3.14
up = supervisor.merge()
supervisor.block()
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_label_cluster_2(supervisor):
_select(supervisor, [20])
supervisor.label("my_field", 3.14)
supervisor.block()
# One of the parents.
l = supervisor.get_labels('my_field')
assert l[20] == 3.14
assert l[30] is None
up = supervisor.merge([20, 30])
supervisor.block()
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_label_cluster_3(supervisor):
# Conflict: largest cluster wins.
_select(supervisor, [20, 30])
supervisor.label("my_field", 3.14)
supervisor.block()
# Create merged cluster from 20 and 30.
up = supervisor.merge()
new = up.added[0]
supervisor.block()
# It fot the label of its parents.
assert supervisor.get_labels('my_field')[new] == 3.14
# Now, we label a smaller cluster.
supervisor.label("my_field", 2.718, cluster_ids=[10])
# We merge the large and small cluster together.
up = supervisor.merge(up.added + [10])
supervisor.block()
# The new cluster should have the value of the first, merged big cluster, i.e. 3.14.
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_move_1(supervisor):
_select(supervisor, [20])
_assert_selected(supervisor, [20])
assert not supervisor.move('', '')
supervisor.actions.move('noise', 'all')
supervisor.block()
_assert_selected(supervisor, [11])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [11])
def test_supervisor_move_2(supervisor):
_select(supervisor, [20], [10])
_assert_selected(supervisor, [20, 10])
supervisor.actions.move('noise', 10)
supervisor.block()
_assert_selected(supervisor, [20, 2])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [20, 10])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [20, 2])
def test_supervisor_move_3(qtbot, supervisor):
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.actions.move_best_to_noise()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.actions.move_best_to_mua()
supervisor.block()
_assert_selected(supervisor, [11])
supervisor.actions.move_best_to_good()
supervisor.block()
_assert_selected(supervisor, [2])
supervisor.cluster_meta.get('group', 30) == 'noise'
supervisor.cluster_meta.get('group', 20) == 'mua'
supervisor.cluster_meta.get('group', 11) == 'good'
def test_supervisor_move_4(supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.move_similar_to_noise()
supervisor.block()
_assert_selected(supervisor, [30, 11])
supervisor.actions.move_similar_to_mua()
supervisor.block()
_assert_selected(supervisor, [30, 2])
supervisor.actions.move_similar_to_good()
supervisor.block()
_assert_selected(supervisor, [30, 1])
supervisor.cluster_meta.get('group', 20) == 'noise'
supervisor.cluster_meta.get('group', 11) == 'mua'
supervisor.cluster_meta.get('group', 2) == 'good'
def test_supervisor_move_5(supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.move_all_to_noise()
supervisor.block()
_assert_selected(supervisor, [11, 2])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [11, 1])
supervisor.actions.move_all_to_mua()
supervisor.block()
_assert_selected(supervisor, [2])
supervisor.actions.move_all_to_good()
supervisor.block()
_assert_selected(supervisor, [])
supervisor.cluster_meta.get('group', 30) == 'noise'
supervisor.cluster_meta.get('group', 20) == 'noise'
supervisor.cluster_meta.get('group', 11) == 'mua'
supervisor.cluster_meta.get('group', 10) == 'mua'
supervisor.cluster_meta.get('group', 2) == 'good'
supervisor.cluster_meta.get('group', 1) == 'good'
def test_supervisor_reset(qtbot, supervisor):
supervisor.select_actions.select([10, 11])
supervisor.select_actions.reset_wizard()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30, 20])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30, 11])
supervisor.select_actions.previous()
supervisor.block()
_assert_selected(supervisor, [30, 20])
def test_supervisor_nav(qtbot, supervisor):
supervisor.select_actions.reset_wizard()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.select_actions.previous_best()
supervisor.block()
_assert_selected(supervisor, [30])
| en | 0.443504 | # -*- coding: utf-8 -*- Test GUI component. #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ #from contextlib import contextmanager #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ # NOTE: mock patch show box exec_ #------------------------------------------------------------------------------ # Test tasks #------------------------------------------------------------------------------ # pragma: no cover #------------------------------------------------------------------------------ # Test cluster and similarity views #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # Test ActionCreator #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # Test GUI component #------------------------------------------------------------------------------ # The action fails while the supervisor is busy. # The action succeeds because the supervisor is no longer busy. # WARNING: always use actions in tests, because this doesn't call # the supervisor method directly, but raises an event, enqueue the task, # and call TaskLogger.process() which handles the cascade of callbacks. # Empty selection at first. # Merge. # Split. # Move. # yield [0, 1, 2, 10, 11, 20, 30] # # i, g, N, i, g, N, N # After a merge, there should be only one select event. Check that merge then move selects the next cluster in the original cluster view, not the updated cluster view. # Same value for the old clusters. # One of the parents. # Conflict: largest cluster wins. # Create merged cluster from 20 and 30. # It fot the label of its parents. # Now, we label a smaller cluster. # We merge the large and small cluster together. # The new cluster should have the value of the first, merged big cluster, i.e. 3.14. | 1.894011 | 2 |
Source/CommandManager.py | SOBotics/Botpy | 5 | 10076 | <filename>Source/CommandManager.py
#
# CommandManager.py
# Botpy
#
# Created by <NAME> on 4th September 2017.
#
#
import threading
import chatexchange as ce
class CommandManager:
def __init__(self, commands):
self.commands = commands
self.running_commands = []
def run_command(self, command):
if command.privileges() == 0:
command_thread = threading.Thread(target=command.run)
self.running_commands.append([command, command_thread])
command_thread.start()
return
if command.message.room.is_user_privileged(command.message.user.id, command.privileges()):
command_thread = threading.Thread(target=command.run)
self.running_commands.append([command, command_thread])
command_thread.start()
return
command.reply("You do not have sufficient privileges to run this command.")
def handle_command(self, message):
try:
message_content = message.content.split()
del message_content[0]
except AttributeError:
return
for command in self.commands:
command_usage = command.usage()
usage_index = -1
for usage in command_usage:
usage_index += 1
usage_components = usage.split()
args = []
match = True
last_index = min(len(usage_components), len(message_content))
for i in range(last_index):
content_component = message_content[i]
usage_component = usage_components[i]
if usage_component == '*':
args.append(content_component)
elif usage_component == '...':
#Everything else is arguments
temp_index = i
while temp_index < len(message_content):
args.append(message_content[temp_index])
temp_index += 1
elif content_component != usage_component:
match = False
min_count = len(usage_components) - 1 \
if usage_components[-1] == '...' else len(usage_components)
if len(message_content) < min_count:
match = False
if match:
self.run_command(command(self, message, args, usage_index))
return
def cleanup_finished_commands(self):
for command, command_thread in self.running_commands:
if not command_thread.isAlive():
self.running_commands.remove([command, command_thread])
| <filename>Source/CommandManager.py
#
# CommandManager.py
# Botpy
#
# Created by <NAME> on 4th September 2017.
#
#
import threading
import chatexchange as ce
class CommandManager:
def __init__(self, commands):
self.commands = commands
self.running_commands = []
def run_command(self, command):
if command.privileges() == 0:
command_thread = threading.Thread(target=command.run)
self.running_commands.append([command, command_thread])
command_thread.start()
return
if command.message.room.is_user_privileged(command.message.user.id, command.privileges()):
command_thread = threading.Thread(target=command.run)
self.running_commands.append([command, command_thread])
command_thread.start()
return
command.reply("You do not have sufficient privileges to run this command.")
def handle_command(self, message):
try:
message_content = message.content.split()
del message_content[0]
except AttributeError:
return
for command in self.commands:
command_usage = command.usage()
usage_index = -1
for usage in command_usage:
usage_index += 1
usage_components = usage.split()
args = []
match = True
last_index = min(len(usage_components), len(message_content))
for i in range(last_index):
content_component = message_content[i]
usage_component = usage_components[i]
if usage_component == '*':
args.append(content_component)
elif usage_component == '...':
#Everything else is arguments
temp_index = i
while temp_index < len(message_content):
args.append(message_content[temp_index])
temp_index += 1
elif content_component != usage_component:
match = False
min_count = len(usage_components) - 1 \
if usage_components[-1] == '...' else len(usage_components)
if len(message_content) < min_count:
match = False
if match:
self.run_command(command(self, message, args, usage_index))
return
def cleanup_finished_commands(self):
for command, command_thread in self.running_commands:
if not command_thread.isAlive():
self.running_commands.remove([command, command_thread])
| en | 0.88065 | # # CommandManager.py # Botpy # # Created by <NAME> on 4th September 2017. # # #Everything else is arguments | 2.298014 | 2 |
tests/periodicities/gen_makefile.py | jmabry/pyaf | 377 | 10077 | <reponame>jmabry/pyaf<filename>tests/periodicities/gen_makefile.py
import os
import glob
subdirs = glob.glob("tests/periodicities/*");
subdirs = ['tests/periodicities/Month',
'tests/periodicities/Minute',
'tests/periodicities/Week',
'tests/periodicities/Business_Hour',
'tests/periodicities/Business_Day',
'tests/periodicities/Second',
'tests/periodicities/Semi_Month',
'tests/periodicities/Hour',
'tests/periodicities/Day']
#print(subdirs)
print("PYTHON=python3\n\n");
lAllTarget = "";
for subdir1 in sorted(subdirs):
lBase = os.path.basename(subdir1);
test_target = "";
for filename in sorted(glob.glob(subdir1 + "/*.py")):
bn = os.path.basename(filename);
logfile = bn.replace("/" , "_");
logfile = "logs/periodicities_" + logfile.replace(".py" , ".log");
print("#PROCESSING FILE : " , filename, bn , logfile);
print(bn , " : " , "\n\t", "-$(PYTHON) " , filename , " > " , logfile , " 2>&1");
test_target = bn + " " + test_target;
lAllTarget = lAllTarget + " " + lBase;
print("\n\n", lBase , ": ", test_target, "\n" , "\n");
print("\n# ********************************************** \n");
print("all: " , lAllTarget , "\n\t\n");
| import os
import glob
subdirs = glob.glob("tests/periodicities/*");
subdirs = ['tests/periodicities/Month',
'tests/periodicities/Minute',
'tests/periodicities/Week',
'tests/periodicities/Business_Hour',
'tests/periodicities/Business_Day',
'tests/periodicities/Second',
'tests/periodicities/Semi_Month',
'tests/periodicities/Hour',
'tests/periodicities/Day']
#print(subdirs)
print("PYTHON=python3\n\n");
lAllTarget = "";
for subdir1 in sorted(subdirs):
lBase = os.path.basename(subdir1);
test_target = "";
for filename in sorted(glob.glob(subdir1 + "/*.py")):
bn = os.path.basename(filename);
logfile = bn.replace("/" , "_");
logfile = "logs/periodicities_" + logfile.replace(".py" , ".log");
print("#PROCESSING FILE : " , filename, bn , logfile);
print(bn , " : " , "\n\t", "-$(PYTHON) " , filename , " > " , logfile , " 2>&1");
test_target = bn + " " + test_target;
lAllTarget = lAllTarget + " " + lBase;
print("\n\n", lBase , ": ", test_target, "\n" , "\n");
print("\n# ********************************************** \n");
print("all: " , lAllTarget , "\n\t\n"); | el | 0.263146 | #print(subdirs) # ********************************************** \n"); | 2.734335 | 3 |
test/test_util_registry.py | SimulatedANeal/carpedm | 2 | 10078 | #
# Copyright (C) 2018 <NAME>.
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
#
# Portions of this module are copied or lightly modified from the
# Tensor2Tensor registry_test module, so here is their license:
#
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils.registry
References:
Slight modification of `Tensor2Tensor registry_test`_.
.. _Tensor2Tensor registry_test: https://github.com/tensorflow/
tensor2tensor/blob/master/tensor2tensor/utils/registry_test.py
"""
import unittest
from carpedm.util import registry
from carpedm.models.generic import Model
from carpedm.models.baseline import SingleCharBaseline
class ModelRegistryTest(unittest.TestCase):
def setUp(self):
registry._reset()
def test_model_registration(self):
@registry.register_model
class MyModel1(Model):
pass
model = registry.model("my_model1")
self.assertTrue(model is MyModel1)
def test_named_registration(self):
@registry.register_model("model2")
class MyModel1(Model):
pass
model = registry.model("model2")
self.assertTrue(model is MyModel1)
def test_request_unprovided_model(self):
with self.assertRaisesRegex(LookupError, "never registered"):
_ = registry.model("not_provided")
def test_duplicate_registration(self):
@registry.register_model
def m1():
pass
with self.assertRaisesRegex(LookupError, "already registered"):
@registry.register_model("m1")
def m2():
pass
def test_list_models(self):
@registry.register_model
def m1():
pass
@registry.register_model
def m2():
pass
self.assertSetEqual({"m1", "m2"}, set(registry.list_models()))
def test_snake_case(self):
convert = registry._convert_camel_to_snake
self.assertEqual("typical_camel_case", convert("TypicalCamelCase"))
self.assertEqual("numbers_fuse2gether", convert("NumbersFuse2gether"))
self.assertEqual("numbers_fuse2_gether", convert("NumbersFuse2Gether"))
self.assertEqual("lstm_seq2_seq", convert("LSTMSeq2Seq"))
self.assertEqual("starts_lower", convert("startsLower"))
self.assertEqual("starts_lower_caps", convert("startsLowerCAPS"))
self.assertEqual("caps_fuse_together", convert("CapsFUSETogether"))
self.assertEqual("startscap", convert("Startscap"))
self.assertEqual("s_tartscap", convert("STartscap"))
class ModelProvidedTest(unittest.TestCase):
def setUp(self):
from carpedm import models
def test_access_provided_model(self):
model = registry.model("single_char_baseline")
self.assertTrue(model is SingleCharBaseline)
if __name__ == '__main__':
unittest.main()
| #
# Copyright (C) 2018 <NAME>.
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
#
# Portions of this module are copied or lightly modified from the
# Tensor2Tensor registry_test module, so here is their license:
#
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils.registry
References:
Slight modification of `Tensor2Tensor registry_test`_.
.. _Tensor2Tensor registry_test: https://github.com/tensorflow/
tensor2tensor/blob/master/tensor2tensor/utils/registry_test.py
"""
import unittest
from carpedm.util import registry
from carpedm.models.generic import Model
from carpedm.models.baseline import SingleCharBaseline
class ModelRegistryTest(unittest.TestCase):
def setUp(self):
registry._reset()
def test_model_registration(self):
@registry.register_model
class MyModel1(Model):
pass
model = registry.model("my_model1")
self.assertTrue(model is MyModel1)
def test_named_registration(self):
@registry.register_model("model2")
class MyModel1(Model):
pass
model = registry.model("model2")
self.assertTrue(model is MyModel1)
def test_request_unprovided_model(self):
with self.assertRaisesRegex(LookupError, "never registered"):
_ = registry.model("not_provided")
def test_duplicate_registration(self):
@registry.register_model
def m1():
pass
with self.assertRaisesRegex(LookupError, "already registered"):
@registry.register_model("m1")
def m2():
pass
def test_list_models(self):
@registry.register_model
def m1():
pass
@registry.register_model
def m2():
pass
self.assertSetEqual({"m1", "m2"}, set(registry.list_models()))
def test_snake_case(self):
convert = registry._convert_camel_to_snake
self.assertEqual("typical_camel_case", convert("TypicalCamelCase"))
self.assertEqual("numbers_fuse2gether", convert("NumbersFuse2gether"))
self.assertEqual("numbers_fuse2_gether", convert("NumbersFuse2Gether"))
self.assertEqual("lstm_seq2_seq", convert("LSTMSeq2Seq"))
self.assertEqual("starts_lower", convert("startsLower"))
self.assertEqual("starts_lower_caps", convert("startsLowerCAPS"))
self.assertEqual("caps_fuse_together", convert("CapsFUSETogether"))
self.assertEqual("startscap", convert("Startscap"))
self.assertEqual("s_tartscap", convert("STartscap"))
class ModelProvidedTest(unittest.TestCase):
def setUp(self):
from carpedm import models
def test_access_provided_model(self):
model = registry.model("single_char_baseline")
self.assertTrue(model is SingleCharBaseline)
if __name__ == '__main__':
unittest.main()
| en | 0.802799 | # # Copyright (C) 2018 <NAME>. # # This software may be modified and distributed under the terms # of the MIT license. See the LICENSE file for details. # # # Portions of this module are copied or lightly modified from the # Tensor2Tensor registry_test module, so here is their license: # # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for utils.registry References: Slight modification of `Tensor2Tensor registry_test`_. .. _Tensor2Tensor registry_test: https://github.com/tensorflow/ tensor2tensor/blob/master/tensor2tensor/utils/registry_test.py | 2.11834 | 2 |
pay-api/migrations/versions/8f7565cf50c1_.py | stevenc987/sbc-pay | 0 | 10079 | <reponame>stevenc987/sbc-pay
"""empty message
Revision ID: 8f7565cf50c1
Revises: 872760122cc9, <KEY>
Create Date: 2020-10-02 11:11:49.823678
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = ('872760122cc9', '<KEY>')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| """empty message
Revision ID: 8f7565cf50c1
Revises: 872760122cc9, <KEY>
Create Date: 2020-10-02 11:11:49.823678
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = ('872760122cc9', '<KEY>')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass | en | 0.467755 | empty message Revision ID: 8f7565cf50c1 Revises: 872760122cc9, <KEY> Create Date: 2020-10-02 11:11:49.823678 # revision identifiers, used by Alembic. | 1.028404 | 1 |
iotronic/wamp/agent.py | smartmeio/stack4things-openstack-iotronic | 1 | 10080 | <reponame>smartmeio/stack4things-openstack-iotronic<filename>iotronic/wamp/agent.py<gh_stars>1-10
# Copyright 2017 MDSLAB - University of Messina
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import asyncio
import json
import subprocess
import time
import txaio
from iotronic.common import exception
from iotronic.common.i18n import _
from iotronic.common.i18n import _LI
from iotronic.common.i18n import _LW
from iotronic.db import api as dbapi
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_messaging.rpc import dispatcher
import importlib
from threading import Thread
import ssl
import os
import signal
from autobahn.asyncio.component import Component
LOG = logging.getLogger(__name__)
service_opts = [
cfg.StrOpt('notification_level',
choices=[('debug', _('"debug" level')),
('info', _('"info" level')),
('warning', _('"warning" level')),
('error', _('"error" level')),
('critical', _('"critical" level'))],
help=_('Specifies the minimum level for which to send '
'notifications. If not set, no notifications will '
'be sent. The default is for this option to be unset.')),
]
wamp_opts = [
cfg.StrOpt('wamp_transport_url',
default='ws://localhost:8181/',
help=('URL of wamp broker')),
cfg.StrOpt('wamp_realm',
default='s4t',
help=('realm broker')),
cfg.BoolOpt('register_agent',
default=False,
help=('Flag for marking this agent as a registration agent')),
cfg.BoolOpt('skip_cert_verify',
default=False,
help=(
'Flag for skipping the verification of the server cert '
'(for the auto-signed ones)')),
cfg.IntOpt('autoPingInterval',
default=2,
help=('autoPingInterval parameter for wamp')),
cfg.IntOpt('autoPingTimeout',
default=2,
help=('autoPingInterval parameter for wamp')),
cfg.BoolOpt('service_allow_list',
default=False,
help='Enable service allow list checks.'),
cfg.StrOpt('service_allow_list_path',
default="(/var/lib/wstun/allowlist)",
help='Path of allowlist.json file.'),
]
proxy_opts = [
cfg.StrOpt('proxy',
choices=[('nginx', _('nginx proxy')), ],
help=_('Proxy for webservices')),
]
CONF = cfg.CONF
cfg.CONF.register_opts(service_opts)
cfg.CONF.register_opts(proxy_opts)
CONF.register_opts(wamp_opts, 'wamp')
txaio.start_logging(level="info")
wamp_session_caller = None
AGENT_HOST = None
LOOP = None
connected = False
async def wamp_request(kwarg):
# for previous LR version (to be removed asap)
if 'req' in kwarg:
LOG.debug("calling: " + kwarg['wamp_rpc_call'] +
" with request id: " + kwarg['req']['uuid'])
d = await wamp_session_caller.call(kwarg['wamp_rpc_call'],
kwarg['req'],
*kwarg['data'])
else:
LOG.debug("calling: " + kwarg['wamp_rpc_call'])
d = await wamp_session_caller.call(kwarg['wamp_rpc_call'],
*kwarg['data'])
return d
# OSLO ENDPOINT
class WampEndpoint(object):
def s4t_invoke_wamp(self, ctx, **kwarg):
LOG.debug("CONDUCTOR sent me: " + kwarg['wamp_rpc_call'])
r = asyncio.run_coroutine_threadsafe(wamp_request(kwarg), LOOP)
return r.result()
def read_allowlist():
try:
with open(CONF.wamp.service_allow_list_path, "r") as allow_file:
allow_list_str = allow_file.read()
allow_list = json.loads(allow_list_str)
#LOG.debug(allow_list)
return allow_list
except Exception as err:
LOG.error(err)
class AgentEndpoint(object):
# used for testing
def echo(self, ctx, text):
LOG.debug("ECHO of " + text)
return text
def create_tap_interface(self, ctx, port_uuid, tcp_port):
time.sleep(12)
LOG.debug('Creating tap interface on the wamp agent host')
p = subprocess.Popen('socat -d -d TCP:localhost:' + tcp_port +
',reuseaddr,forever,interval=10 TUN,tun-type=tap,'
'tun-name=tap' + port_uuid[0:14] +
',up ', shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return 1
def addin_allowlist(self, ctx, device, port):
try:
allow_list = read_allowlist()
new_node={}
new_node['client']=device
new_node['port']=str(port)
if new_node in allow_list:
LOG.warning("This device already exposes this port!")
else:
allow_list.append(new_node)
with open(CONF.wamp.service_allow_list_path, "r+") as allow_file:
allow_file.seek(0)
allow_file.write("%s" % json.dumps(allow_list))
allow_file.truncate()
read_allowlist()
LOG.debug("Added device/service port in allow list.")
except Exception as err:
print(err)
def remove_from_allowlist(self, ctx, device, port):
try:
allow_list = read_allowlist()
new_node={}
new_node['client']=device
new_node['port']=str(port)
if new_node in allow_list:
allow_list.remove(new_node)
with open(CONF.wamp.service_allow_list_path, "r+") as allow_file:
allow_file.seek(0)
allow_file.write("%s" % json.dumps(allow_list))
allow_file.truncate()
LOG.debug("Removed device/service port from allow list.")
except Exception as err:
print(err)
class RPCServer(Thread):
def __init__(self):
# AMQP CONFIG
proxy = importlib.import_module("iotronic.wamp.proxies." + CONF.proxy)
endpoints = [
WampEndpoint(),
AgentEndpoint(),
proxy.ProxyManager()
]
Thread.__init__(self)
transport = oslo_messaging.get_transport(CONF)
target = oslo_messaging.Target(topic='s4t',
server=AGENT_HOST)
access_policy = dispatcher.DefaultRPCAccessPolicy
self.server = oslo_messaging.get_rpc_server(
transport, target,
endpoints, executor='threading',
access_policy=access_policy)
def run(self):
LOG.info("Starting AMQP server... ")
self.server.start()
def stop(self):
LOG.info("Stopping AMQP server... ")
self.server.stop()
LOG.info("AMQP server stopped. ")
class WampManager(object):
def __init__(self):
LOG.debug("wamp url: %s wamp realm: %s",
CONF.wamp.wamp_transport_url, CONF.wamp.wamp_realm)
self.loop = asyncio.get_event_loop()
global LOOP
LOOP = self.loop
wamp_transport = CONF.wamp.wamp_transport_url
wurl_list = wamp_transport.split(':')
is_wss = False
if wurl_list[0] == "wss":
is_wss = True
whost = wurl_list[1].replace('/', '')
wport = int(wurl_list[2].replace('/', ''))
if is_wss and CONF.wamp.skip_cert_verify:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
wamp_transport = [
{
"url": CONF.wamp.wamp_transport_url,
"serializers": ["json"],
"endpoint": {
"type": "tcp",
"host": whost,
"port": wport,
"tls": ctx
},
},
]
comp = Component(
transports=wamp_transport,
realm=CONF.wamp.wamp_realm
)
self.comp = comp
@comp.on_join
async def onJoin(session, details):
global connected
connected = True
global wamp_session_caller, AGENT_HOST
wamp_session_caller = session
import iotronic.wamp.functions as fun
session.subscribe(fun.board_on_leave,
'wamp.session.on_leave')
session.subscribe(fun.board_on_join,
'wamp.session.on_join')
try:
if CONF.wamp.register_agent:
session.register(fun.registration,
u'stack4things.register')
LOG.info("I have been set as registration agent")
session.register(fun.connection,
AGENT_HOST + u'.stack4things.connection')
session.register(fun.echo,
AGENT_HOST + u'.stack4things.echo')
session.register(fun.alive,
AGENT_HOST + u'.stack4things.alive')
session.register(fun.wamp_alive,
AGENT_HOST + u'.stack4things.wamp_alive')
session.register(fun.notify_result,
AGENT_HOST + u'.stack4things.notify_result')
LOG.debug("procedure registered")
except Exception as e:
LOG.error("could not register procedure: {0}".format(e))
LOG.info("WAMP session ready.")
session_l = await session.call(u'wamp.session.list')
session_l.remove(details.session)
fun.update_sessions(session_l, AGENT_HOST)
@comp.on_leave
async def onLeave(session, details):
LOG.warning('WAMP Session Left: ' + str(details))
@comp.on_disconnect
async def onDisconnect(session, was_clean):
LOG.warning('WAMP Transport Left: ' + str(was_clean))
global connected
connected = False
if not connected:
comp.start(self.loop)
def start(self):
LOG.info("Starting WAMP server...")
self.comp.start(self.loop)
self.loop.run_forever()
def stop(self):
LOG.info("Stopping WAMP server...")
# Canceling pending tasks and stopping the loop
asyncio.gather(*asyncio.Task.all_tasks()).cancel()
# Stopping the loop
self.loop.stop()
LOG.info("WAMP server stopped.")
class WampAgent(object):
def __init__(self, host):
signal.signal(signal.SIGINT, self.stop_handler)
logging.register_options(CONF)
CONF(project='iotronic')
logging.setup(CONF, "iotronic-wamp-agent")
if CONF.debug:
txaio.start_logging(level="debug")
# to be removed asap
self.host = host
self.dbapi = dbapi.get_instance()
try:
wpa = self.dbapi.register_wampagent(
{'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url})
except exception.WampAgentAlreadyRegistered:
LOG.warn(_LW("A wampagent with hostname %(hostname)s "
"was previously registered. Updating registration"),
{'hostname': self.host})
wpa = self.dbapi.register_wampagent(
{'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url},
update_existing=True)
self.wampagent = wpa
self.wampagent.ragent = CONF.wamp.register_agent
self.wampagent.save()
global AGENT_HOST
AGENT_HOST = self.host
self.r = RPCServer()
self.w = WampManager()
self.r.start()
self.w.start()
def del_host(self, deregister=True):
if deregister:
try:
self.dbapi.unregister_wampagent(self.host)
LOG.info(_LI('Successfully stopped wampagent with hostname '
'%(hostname)s.'),
{'hostname': self.host})
except exception.WampAgentNotFound:
pass
else:
LOG.info(_LI('Not deregistering wampagent with hostname '
'%(hostname)s.'),
{'hostname': self.host})
def stop_handler(self, signum, frame):
self.w.stop()
self.r.stop()
self.del_host()
os._exit(0)
| # Copyright 2017 MDSLAB - University of Messina
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import asyncio
import json
import subprocess
import time
import txaio
from iotronic.common import exception
from iotronic.common.i18n import _
from iotronic.common.i18n import _LI
from iotronic.common.i18n import _LW
from iotronic.db import api as dbapi
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_messaging.rpc import dispatcher
import importlib
from threading import Thread
import ssl
import os
import signal
from autobahn.asyncio.component import Component
LOG = logging.getLogger(__name__)
service_opts = [
cfg.StrOpt('notification_level',
choices=[('debug', _('"debug" level')),
('info', _('"info" level')),
('warning', _('"warning" level')),
('error', _('"error" level')),
('critical', _('"critical" level'))],
help=_('Specifies the minimum level for which to send '
'notifications. If not set, no notifications will '
'be sent. The default is for this option to be unset.')),
]
wamp_opts = [
cfg.StrOpt('wamp_transport_url',
default='ws://localhost:8181/',
help=('URL of wamp broker')),
cfg.StrOpt('wamp_realm',
default='s4t',
help=('realm broker')),
cfg.BoolOpt('register_agent',
default=False,
help=('Flag for marking this agent as a registration agent')),
cfg.BoolOpt('skip_cert_verify',
default=False,
help=(
'Flag for skipping the verification of the server cert '
'(for the auto-signed ones)')),
cfg.IntOpt('autoPingInterval',
default=2,
help=('autoPingInterval parameter for wamp')),
cfg.IntOpt('autoPingTimeout',
default=2,
help=('autoPingInterval parameter for wamp')),
cfg.BoolOpt('service_allow_list',
default=False,
help='Enable service allow list checks.'),
cfg.StrOpt('service_allow_list_path',
default="(/var/lib/wstun/allowlist)",
help='Path of allowlist.json file.'),
]
proxy_opts = [
cfg.StrOpt('proxy',
choices=[('nginx', _('nginx proxy')), ],
help=_('Proxy for webservices')),
]
CONF = cfg.CONF
cfg.CONF.register_opts(service_opts)
cfg.CONF.register_opts(proxy_opts)
CONF.register_opts(wamp_opts, 'wamp')
txaio.start_logging(level="info")
wamp_session_caller = None
AGENT_HOST = None
LOOP = None
connected = False
async def wamp_request(kwarg):
# for previous LR version (to be removed asap)
if 'req' in kwarg:
LOG.debug("calling: " + kwarg['wamp_rpc_call'] +
" with request id: " + kwarg['req']['uuid'])
d = await wamp_session_caller.call(kwarg['wamp_rpc_call'],
kwarg['req'],
*kwarg['data'])
else:
LOG.debug("calling: " + kwarg['wamp_rpc_call'])
d = await wamp_session_caller.call(kwarg['wamp_rpc_call'],
*kwarg['data'])
return d
# OSLO ENDPOINT
class WampEndpoint(object):
def s4t_invoke_wamp(self, ctx, **kwarg):
LOG.debug("CONDUCTOR sent me: " + kwarg['wamp_rpc_call'])
r = asyncio.run_coroutine_threadsafe(wamp_request(kwarg), LOOP)
return r.result()
def read_allowlist():
try:
with open(CONF.wamp.service_allow_list_path, "r") as allow_file:
allow_list_str = allow_file.read()
allow_list = json.loads(allow_list_str)
#LOG.debug(allow_list)
return allow_list
except Exception as err:
LOG.error(err)
class AgentEndpoint(object):
# used for testing
def echo(self, ctx, text):
LOG.debug("ECHO of " + text)
return text
def create_tap_interface(self, ctx, port_uuid, tcp_port):
time.sleep(12)
LOG.debug('Creating tap interface on the wamp agent host')
p = subprocess.Popen('socat -d -d TCP:localhost:' + tcp_port +
',reuseaddr,forever,interval=10 TUN,tun-type=tap,'
'tun-name=tap' + port_uuid[0:14] +
',up ', shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return 1
def addin_allowlist(self, ctx, device, port):
try:
allow_list = read_allowlist()
new_node={}
new_node['client']=device
new_node['port']=str(port)
if new_node in allow_list:
LOG.warning("This device already exposes this port!")
else:
allow_list.append(new_node)
with open(CONF.wamp.service_allow_list_path, "r+") as allow_file:
allow_file.seek(0)
allow_file.write("%s" % json.dumps(allow_list))
allow_file.truncate()
read_allowlist()
LOG.debug("Added device/service port in allow list.")
except Exception as err:
print(err)
def remove_from_allowlist(self, ctx, device, port):
try:
allow_list = read_allowlist()
new_node={}
new_node['client']=device
new_node['port']=str(port)
if new_node in allow_list:
allow_list.remove(new_node)
with open(CONF.wamp.service_allow_list_path, "r+") as allow_file:
allow_file.seek(0)
allow_file.write("%s" % json.dumps(allow_list))
allow_file.truncate()
LOG.debug("Removed device/service port from allow list.")
except Exception as err:
print(err)
class RPCServer(Thread):
def __init__(self):
# AMQP CONFIG
proxy = importlib.import_module("iotronic.wamp.proxies." + CONF.proxy)
endpoints = [
WampEndpoint(),
AgentEndpoint(),
proxy.ProxyManager()
]
Thread.__init__(self)
transport = oslo_messaging.get_transport(CONF)
target = oslo_messaging.Target(topic='s4t',
server=AGENT_HOST)
access_policy = dispatcher.DefaultRPCAccessPolicy
self.server = oslo_messaging.get_rpc_server(
transport, target,
endpoints, executor='threading',
access_policy=access_policy)
def run(self):
LOG.info("Starting AMQP server... ")
self.server.start()
def stop(self):
LOG.info("Stopping AMQP server... ")
self.server.stop()
LOG.info("AMQP server stopped. ")
class WampManager(object):
def __init__(self):
LOG.debug("wamp url: %s wamp realm: %s",
CONF.wamp.wamp_transport_url, CONF.wamp.wamp_realm)
self.loop = asyncio.get_event_loop()
global LOOP
LOOP = self.loop
wamp_transport = CONF.wamp.wamp_transport_url
wurl_list = wamp_transport.split(':')
is_wss = False
if wurl_list[0] == "wss":
is_wss = True
whost = wurl_list[1].replace('/', '')
wport = int(wurl_list[2].replace('/', ''))
if is_wss and CONF.wamp.skip_cert_verify:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
wamp_transport = [
{
"url": CONF.wamp.wamp_transport_url,
"serializers": ["json"],
"endpoint": {
"type": "tcp",
"host": whost,
"port": wport,
"tls": ctx
},
},
]
comp = Component(
transports=wamp_transport,
realm=CONF.wamp.wamp_realm
)
self.comp = comp
@comp.on_join
async def onJoin(session, details):
global connected
connected = True
global wamp_session_caller, AGENT_HOST
wamp_session_caller = session
import iotronic.wamp.functions as fun
session.subscribe(fun.board_on_leave,
'wamp.session.on_leave')
session.subscribe(fun.board_on_join,
'wamp.session.on_join')
try:
if CONF.wamp.register_agent:
session.register(fun.registration,
u'stack4things.register')
LOG.info("I have been set as registration agent")
session.register(fun.connection,
AGENT_HOST + u'.stack4things.connection')
session.register(fun.echo,
AGENT_HOST + u'.stack4things.echo')
session.register(fun.alive,
AGENT_HOST + u'.stack4things.alive')
session.register(fun.wamp_alive,
AGENT_HOST + u'.stack4things.wamp_alive')
session.register(fun.notify_result,
AGENT_HOST + u'.stack4things.notify_result')
LOG.debug("procedure registered")
except Exception as e:
LOG.error("could not register procedure: {0}".format(e))
LOG.info("WAMP session ready.")
session_l = await session.call(u'wamp.session.list')
session_l.remove(details.session)
fun.update_sessions(session_l, AGENT_HOST)
@comp.on_leave
async def onLeave(session, details):
LOG.warning('WAMP Session Left: ' + str(details))
@comp.on_disconnect
async def onDisconnect(session, was_clean):
LOG.warning('WAMP Transport Left: ' + str(was_clean))
global connected
connected = False
if not connected:
comp.start(self.loop)
def start(self):
LOG.info("Starting WAMP server...")
self.comp.start(self.loop)
self.loop.run_forever()
def stop(self):
LOG.info("Stopping WAMP server...")
# Canceling pending tasks and stopping the loop
asyncio.gather(*asyncio.Task.all_tasks()).cancel()
# Stopping the loop
self.loop.stop()
LOG.info("WAMP server stopped.")
class WampAgent(object):
def __init__(self, host):
signal.signal(signal.SIGINT, self.stop_handler)
logging.register_options(CONF)
CONF(project='iotronic')
logging.setup(CONF, "iotronic-wamp-agent")
if CONF.debug:
txaio.start_logging(level="debug")
# to be removed asap
self.host = host
self.dbapi = dbapi.get_instance()
try:
wpa = self.dbapi.register_wampagent(
{'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url})
except exception.WampAgentAlreadyRegistered:
LOG.warn(_LW("A wampagent with hostname %(hostname)s "
"was previously registered. Updating registration"),
{'hostname': self.host})
wpa = self.dbapi.register_wampagent(
{'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url},
update_existing=True)
self.wampagent = wpa
self.wampagent.ragent = CONF.wamp.register_agent
self.wampagent.save()
global AGENT_HOST
AGENT_HOST = self.host
self.r = RPCServer()
self.w = WampManager()
self.r.start()
self.w.start()
def del_host(self, deregister=True):
if deregister:
try:
self.dbapi.unregister_wampagent(self.host)
LOG.info(_LI('Successfully stopped wampagent with hostname '
'%(hostname)s.'),
{'hostname': self.host})
except exception.WampAgentNotFound:
pass
else:
LOG.info(_LI('Not deregistering wampagent with hostname '
'%(hostname)s.'),
{'hostname': self.host})
def stop_handler(self, signum, frame):
self.w.stop()
self.r.stop()
self.del_host()
os._exit(0) | en | 0.836393 | # Copyright 2017 MDSLAB - University of Messina # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # for previous LR version (to be removed asap) # OSLO ENDPOINT #LOG.debug(allow_list) # used for testing # AMQP CONFIG # Canceling pending tasks and stopping the loop # Stopping the loop # to be removed asap | 1.842086 | 2 |
src/export_to_poseviz.py | anibali/metro-pose3d | 52 | 10081 | <gh_stars>10-100
#!/usr/bin/env python3
import argparse
import logging
import sys
import numpy as np
import util
def main():
flags = initialize()
logging.debug(f'Loading from {flags.in_path}')
a = np.load(flags.in_path, allow_pickle=True)
all_results_3d = {}
for image_path, coords3d_pred in zip(a['image_path'], a['coords3d_pred_world']):
image_path = image_path.decode('utf8')
all_results_3d.setdefault(
image_path, []).append(coords3d_pred.tolist())
logging.info(f'Writing to file {flags.out_path}')
util.dump_json(all_results_3d, flags.out_path)
def initialize():
parser = argparse.ArgumentParser()
parser.add_argument('--in-path', type=str, required=True)
parser.add_argument('--out-path', type=str, default=None)
parser.add_argument('--loglevel', type=str, default='info')
flags = parser.parse_args()
if flags.out_path is None:
flags.out_path = flags.in_path.replace('.npz', '.json')
loglevel = dict(error=40, warning=30, info=20, debug=10)[flags.loglevel]
simple_formatter = logging.Formatter('{asctime}-{levelname:^1.1} -- {message}', style='{')
print_handler = logging.StreamHandler(sys.stdout)
print_handler.setLevel(loglevel)
print_handler.setFormatter(simple_formatter)
logging.basicConfig(level=loglevel, handlers=[print_handler])
return flags
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import argparse
import logging
import sys
import numpy as np
import util
def main():
flags = initialize()
logging.debug(f'Loading from {flags.in_path}')
a = np.load(flags.in_path, allow_pickle=True)
all_results_3d = {}
for image_path, coords3d_pred in zip(a['image_path'], a['coords3d_pred_world']):
image_path = image_path.decode('utf8')
all_results_3d.setdefault(
image_path, []).append(coords3d_pred.tolist())
logging.info(f'Writing to file {flags.out_path}')
util.dump_json(all_results_3d, flags.out_path)
def initialize():
parser = argparse.ArgumentParser()
parser.add_argument('--in-path', type=str, required=True)
parser.add_argument('--out-path', type=str, default=None)
parser.add_argument('--loglevel', type=str, default='info')
flags = parser.parse_args()
if flags.out_path is None:
flags.out_path = flags.in_path.replace('.npz', '.json')
loglevel = dict(error=40, warning=30, info=20, debug=10)[flags.loglevel]
simple_formatter = logging.Formatter('{asctime}-{levelname:^1.1} -- {message}', style='{')
print_handler = logging.StreamHandler(sys.stdout)
print_handler.setLevel(loglevel)
print_handler.setFormatter(simple_formatter)
logging.basicConfig(level=loglevel, handlers=[print_handler])
return flags
if __name__ == '__main__':
main() | fr | 0.221828 | #!/usr/bin/env python3 | 2.291753 | 2 |
xcube/core/gen2/local/helpers.py | bcdev/xcube | 97 | 10082 | # The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import xarray as xr
def is_empty_cube(cube: xr.Dataset) -> bool:
return len(cube.data_vars) == 0
def strip_cube(cube: xr.Dataset) -> xr.Dataset:
drop_vars = [k for k, v in cube.data_vars.items()
if len(v.shape) < 3
or np.product(v.shape) == 0
or v.shape[-2] < 2
or v.shape[-1] < 2]
if drop_vars:
return cube.drop_vars(drop_vars)
return cube
| # The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import xarray as xr
def is_empty_cube(cube: xr.Dataset) -> bool:
return len(cube.data_vars) == 0
def strip_cube(cube: xr.Dataset) -> xr.Dataset:
drop_vars = [k for k, v in cube.data_vars.items()
if len(v.shape) < 3
or np.product(v.shape) == 0
or v.shape[-2] < 2
or v.shape[-1] < 2]
if drop_vars:
return cube.drop_vars(drop_vars)
return cube
| en | 0.783621 | # The MIT License (MIT) # Copyright (c) 2021 by the xcube development team and contributors # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. | 2.258219 | 2 |
core/log.py | dl-stuff/dl9 | 0 | 10083 | """Simulation logs"""
from __future__ import annotations # default once 3.10
import sys
from enum import Enum
from typing import Type, TYPE_CHECKING
if TYPE_CHECKING:
from core.timeline import Timeline
class LogKind(Enum):
def __str__(self) -> str:
return self.name
DEBUG = 0
SIM = 1
class LogData:
pass
class Logger:
__slots__ = ["_timeline", "_entries", "_data"]
PRINT_ASAP = True
def __init__(self, timeline: Timeline):
self._timeline = timeline
self.reset()
def reset(self):
self._entries = []
self._data = LogData()
def __call__(self, fmt: str, kind: LogKind, *args, **kwargs) -> None:
entry = LogEntry(self._timeline.now, fmt, kind, *args, **kwargs)
if self.PRINT_ASAP:
print(entry.fmt(), flush=True)
entry.process(self._data)
self._entries.append(entry)
def write(self, output=sys.stdout):
for entry in self:
output.write(entry.fmt())
output.write("\n")
class LogEntry:
"""1 row in the log"""
__slots__ = ["_timestamp", "_kind", "_fmt", "_args", "_kwargs"]
def __init__(self, timestamp: float, fmt: str, kind: LogKind, *args, **kwargs) -> None:
self._timestamp = timestamp
self._fmt = "{ts:>8.3f}{kind:>6}| " + fmt
self._kind = kind
self._args = args
self._kwargs = kwargs
def fmt(self) -> str:
"""Format this line of log"""
return self._fmt.format(ts=self._timestamp, kind=self._kind, *self._args, **self._kwargs)
def process(self, data: LogData) -> None:
"""Does any kind of updates to log data"""
pass
| """Simulation logs"""
from __future__ import annotations # default once 3.10
import sys
from enum import Enum
from typing import Type, TYPE_CHECKING
if TYPE_CHECKING:
from core.timeline import Timeline
class LogKind(Enum):
def __str__(self) -> str:
return self.name
DEBUG = 0
SIM = 1
class LogData:
pass
class Logger:
__slots__ = ["_timeline", "_entries", "_data"]
PRINT_ASAP = True
def __init__(self, timeline: Timeline):
self._timeline = timeline
self.reset()
def reset(self):
self._entries = []
self._data = LogData()
def __call__(self, fmt: str, kind: LogKind, *args, **kwargs) -> None:
entry = LogEntry(self._timeline.now, fmt, kind, *args, **kwargs)
if self.PRINT_ASAP:
print(entry.fmt(), flush=True)
entry.process(self._data)
self._entries.append(entry)
def write(self, output=sys.stdout):
for entry in self:
output.write(entry.fmt())
output.write("\n")
class LogEntry:
"""1 row in the log"""
__slots__ = ["_timestamp", "_kind", "_fmt", "_args", "_kwargs"]
def __init__(self, timestamp: float, fmt: str, kind: LogKind, *args, **kwargs) -> None:
self._timestamp = timestamp
self._fmt = "{ts:>8.3f}{kind:>6}| " + fmt
self._kind = kind
self._args = args
self._kwargs = kwargs
def fmt(self) -> str:
"""Format this line of log"""
return self._fmt.format(ts=self._timestamp, kind=self._kind, *self._args, **self._kwargs)
def process(self, data: LogData) -> None:
"""Does any kind of updates to log data"""
pass
| en | 0.677814 | Simulation logs # default once 3.10 1 row in the log Format this line of log Does any kind of updates to log data | 2.653929 | 3 |
zipline/__init__.py | chalant/pluto | 0 | 10084 | # #
# # Copyright 2015 Quantopian, Inc.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# from distutils.version import StrictVersion
# import os
# import numpy as np
#
# # This is *not* a place to dump arbitrary classes/modules for convenience,
# # it is a place to expose the public interfaces.
# from trading_calendars import get_calendar
#
# from . import data
# from . import finance
# from . import gens
# from . import utils
# from .utils.numpy_utils import numpy_version
# from .utils.pandas_utils import new_pandas
# from .utils.run_algo import run_algorithm
# from ._version import get_versions
#
# # These need to happen after the other imports.
# from . algorithm import TradingAlgorithm
# from . import api
# from zipline import extensions as ext
# from zipline.finance.blotter import Blotter
#
# # PERF: Fire a warning if calendars were instantiated during zipline import.
# # Having calendars doesn't break anything per-se, but it makes zipline imports
# # noticeably slower, which becomes particularly noticeable in the Zipline CLI.
# from trading_calendars.calendar_utils import global_calendar_dispatcher
# if global_calendar_dispatcher._calendars:
# import warnings
# warnings.warn(
# "Found TradingCalendar instances after zipline import.\n"
# "Zipline startup will be much slower until this is fixed!",
# )
# del warnings
# del global_calendar_dispatcher
#
#
# __version__ = get_versions()['version']
# del get_versions
#
# extension_args = ext.Namespace()
#
#
# def load_ipython_extension(ipython):
# from .__main__ import zipline_magic
# ipython.register_magic_function(zipline_magic, 'line_cell', 'zipline')
#
#
# if os.name == 'nt':
# # we need to be able to write to our temp directoy on windows so we
# # create a subdir in %TMP% that has write access and use that as %TMP%
# def _():
# import atexit
# import tempfile
#
# tempfile.tempdir = tempdir = tempfile.mkdtemp()
#
# @atexit.register
# def cleanup_tempdir():
# import shutil
# shutil.rmtree(tempdir)
# _()
# del _
#
# __all__ = [
# 'Blotter',
# 'TradingAlgorithm',
# 'api',
# 'data',
# 'finance',
# 'get_calendar',
# 'gens',
# 'run_algorithm',
# 'utils',
# 'extension_args'
# ]
#
#
# def setup(self,
# np=np,
# numpy_version=numpy_version,
# StrictVersion=StrictVersion,
# new_pandas=new_pandas):
# """Lives in zipline.__init__ for doctests."""
#
# if numpy_version >= StrictVersion('1.14'):
# self.old_opts = np.get_printoptions()
# np.set_printoptions(legacy='1.13')
# else:
# self.old_opts = None
#
# if new_pandas:
# self.old_err = np.geterr()
# # old pandas has numpy compat that sets this
# np.seterr(all='ignore')
# else:
# self.old_err = None
#
#
# def teardown(self, np=np):
# """Lives in zipline.__init__ for doctests."""
#
# if self.old_err is not None:
# np.seterr(**self.old_err)
#
# if self.old_opts is not None:
# np.set_printoptions(**self.old_opts)
#
#
# del os
# del np
# del numpy_version
# del StrictVersion
# del new_pandas
| # #
# # Copyright 2015 Quantopian, Inc.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# from distutils.version import StrictVersion
# import os
# import numpy as np
#
# # This is *not* a place to dump arbitrary classes/modules for convenience,
# # it is a place to expose the public interfaces.
# from trading_calendars import get_calendar
#
# from . import data
# from . import finance
# from . import gens
# from . import utils
# from .utils.numpy_utils import numpy_version
# from .utils.pandas_utils import new_pandas
# from .utils.run_algo import run_algorithm
# from ._version import get_versions
#
# # These need to happen after the other imports.
# from . algorithm import TradingAlgorithm
# from . import api
# from zipline import extensions as ext
# from zipline.finance.blotter import Blotter
#
# # PERF: Fire a warning if calendars were instantiated during zipline import.
# # Having calendars doesn't break anything per-se, but it makes zipline imports
# # noticeably slower, which becomes particularly noticeable in the Zipline CLI.
# from trading_calendars.calendar_utils import global_calendar_dispatcher
# if global_calendar_dispatcher._calendars:
# import warnings
# warnings.warn(
# "Found TradingCalendar instances after zipline import.\n"
# "Zipline startup will be much slower until this is fixed!",
# )
# del warnings
# del global_calendar_dispatcher
#
#
# __version__ = get_versions()['version']
# del get_versions
#
# extension_args = ext.Namespace()
#
#
# def load_ipython_extension(ipython):
# from .__main__ import zipline_magic
# ipython.register_magic_function(zipline_magic, 'line_cell', 'zipline')
#
#
# if os.name == 'nt':
# # we need to be able to write to our temp directoy on windows so we
# # create a subdir in %TMP% that has write access and use that as %TMP%
# def _():
# import atexit
# import tempfile
#
# tempfile.tempdir = tempdir = tempfile.mkdtemp()
#
# @atexit.register
# def cleanup_tempdir():
# import shutil
# shutil.rmtree(tempdir)
# _()
# del _
#
# __all__ = [
# 'Blotter',
# 'TradingAlgorithm',
# 'api',
# 'data',
# 'finance',
# 'get_calendar',
# 'gens',
# 'run_algorithm',
# 'utils',
# 'extension_args'
# ]
#
#
# def setup(self,
# np=np,
# numpy_version=numpy_version,
# StrictVersion=StrictVersion,
# new_pandas=new_pandas):
# """Lives in zipline.__init__ for doctests."""
#
# if numpy_version >= StrictVersion('1.14'):
# self.old_opts = np.get_printoptions()
# np.set_printoptions(legacy='1.13')
# else:
# self.old_opts = None
#
# if new_pandas:
# self.old_err = np.geterr()
# # old pandas has numpy compat that sets this
# np.seterr(all='ignore')
# else:
# self.old_err = None
#
#
# def teardown(self, np=np):
# """Lives in zipline.__init__ for doctests."""
#
# if self.old_err is not None:
# np.seterr(**self.old_err)
#
# if self.old_opts is not None:
# np.set_printoptions(**self.old_opts)
#
#
# del os
# del np
# del numpy_version
# del StrictVersion
# del new_pandas
| en | 0.598939 | # # # # Copyright 2015 Quantopian, Inc. # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # from distutils.version import StrictVersion # import os # import numpy as np # # # This is *not* a place to dump arbitrary classes/modules for convenience, # # it is a place to expose the public interfaces. # from trading_calendars import get_calendar # # from . import data # from . import finance # from . import gens # from . import utils # from .utils.numpy_utils import numpy_version # from .utils.pandas_utils import new_pandas # from .utils.run_algo import run_algorithm # from ._version import get_versions # # # These need to happen after the other imports. # from . algorithm import TradingAlgorithm # from . import api # from zipline import extensions as ext # from zipline.finance.blotter import Blotter # # # PERF: Fire a warning if calendars were instantiated during zipline import. # # Having calendars doesn't break anything per-se, but it makes zipline imports # # noticeably slower, which becomes particularly noticeable in the Zipline CLI. # from trading_calendars.calendar_utils import global_calendar_dispatcher # if global_calendar_dispatcher._calendars: # import warnings # warnings.warn( # "Found TradingCalendar instances after zipline import.\n" # "Zipline startup will be much slower until this is fixed!", # ) # del warnings # del global_calendar_dispatcher # # # __version__ = get_versions()['version'] # del get_versions # # extension_args = ext.Namespace() # # # def load_ipython_extension(ipython): # from .__main__ import zipline_magic # ipython.register_magic_function(zipline_magic, 'line_cell', 'zipline') # # # if os.name == 'nt': # # we need to be able to write to our temp directoy on windows so we # # create a subdir in %TMP% that has write access and use that as %TMP% # def _(): # import atexit # import tempfile # # tempfile.tempdir = tempdir = tempfile.mkdtemp() # # @atexit.register # def cleanup_tempdir(): # import shutil # shutil.rmtree(tempdir) # _() # del _ # # __all__ = [ # 'Blotter', # 'TradingAlgorithm', # 'api', # 'data', # 'finance', # 'get_calendar', # 'gens', # 'run_algorithm', # 'utils', # 'extension_args' # ] # # # def setup(self, # np=np, # numpy_version=numpy_version, # StrictVersion=StrictVersion, # new_pandas=new_pandas): # """Lives in zipline.__init__ for doctests.""" # # if numpy_version >= StrictVersion('1.14'): # self.old_opts = np.get_printoptions() # np.set_printoptions(legacy='1.13') # else: # self.old_opts = None # # if new_pandas: # self.old_err = np.geterr() # # old pandas has numpy compat that sets this # np.seterr(all='ignore') # else: # self.old_err = None # # # def teardown(self, np=np): # """Lives in zipline.__init__ for doctests.""" # # if self.old_err is not None: # np.seterr(**self.old_err) # # if self.old_opts is not None: # np.set_printoptions(**self.old_opts) # # # del os # del np # del numpy_version # del StrictVersion # del new_pandas | 1.564181 | 2 |
smarts/core/utils/traffic_history_service.py | c-h-a-r-l-i-e/SMARTS | 0 | 10085 | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import os
import pickle
from dataclasses import dataclass
from multiprocessing import Pipe, Process, Queue
import ijson
import smarts.core.scenario as scenario
@dataclass
class RequestHistoryRange:
start_index: int
batch_count: int
class Traffic_history_service:
"""responsible for dynamically fetching traffic history json to reduce
memory use of traffic history data
"""
class QueueDone:
pass
def __init__(self, history_file_path):
self._history_file_path = history_file_path
self._all_timesteps = set()
self._current_traffic_history = {}
self._prev_batch_history = {}
# return if traffic history is not used
if history_file_path is None:
return
self._log = logging.getLogger(self.__class__.__name__)
send_data_conn, receive_data_conn = Pipe()
self._receive_data_conn = receive_data_conn
self._request_queue = Queue()
self._fetch_history_proc = Process(
target=self._fetch_history,
args=(
send_data_conn,
self._request_queue,
self._history_file_path,
),
)
self._fetch_history_proc.daemon = True
self._fetch_history_proc.start()
self._range_start = 0
self._batch_size = 300
# initialize
with open(self._history_file_path, "rb") as f:
for index, (t, vehicles_state) in enumerate(
ijson.kvitems(f, "", use_float=True)
):
self._all_timesteps.add(t)
if (
self._range_start <= index
and index < self._range_start + self._batch_size
):
self._current_traffic_history[t] = vehicles_state
self._range_start += self._batch_size
# prepares the next batch
self._prepare_next_batch()
self._receive_data_conn.recv()
def teardown(self):
if self.is_in_use:
self._request_queue.put(Traffic_history_service.QueueDone())
self._request_queue.close()
self._request_queue = None
self._fetch_history_proc.join(timeout=3)
if self._fetch_history_proc.is_alive():
self._log.warning("fetch history process still alive after teardown")
self._fetch_history_proc = None
self._history_file_path = None
def __del__(self):
self.teardown()
@property
def is_in_use(self):
return self._history_file_path is not None
def _fetch_history(self, send_data_conn, request_queue, history_file_path):
"""prepare 1 batch ahead, when received request, immediately return the previously
prepared batch and prepares the next batch.
"""
return_batch = {}
while True:
historyRange = request_queue.get()
if type(historyRange) is Traffic_history_service.QueueDone:
break
assert isinstance(historyRange, RequestHistoryRange)
send_data_conn.send(return_batch)
return_batch = {}
with open(history_file_path, "rb") as f:
for index, (t, vehicles_state) in enumerate(
ijson.kvitems(f, "", use_float=True)
):
if (
historyRange.start_index <= index
and index < historyRange.start_index + historyRange.batch_count
):
return_batch[t] = vehicles_state
send_data_conn.close()
@property
def all_timesteps(self):
return self._all_timesteps
@property
def history_file_path(self):
return self._history_file_path
@property
def traffic_history(self):
return {**self._current_traffic_history, **self._prev_batch_history}
def _prepare_next_batch(self):
self._request_queue.put(
RequestHistoryRange(
start_index=self._range_start,
batch_count=self._batch_size,
)
)
self._range_start += self._batch_size
def fetch_history_at_timestep(self, timestep: str):
if timestep not in self._all_timesteps:
return {}
elif timestep in self.traffic_history:
return self.traffic_history[timestep]
# ask child process to prepare the next batch:
self._prepare_next_batch()
self._prev_batch_history = self._current_traffic_history
# receives the previous batch child process prepared
self._current_traffic_history = self._receive_data_conn.recv()
if timestep in self._current_traffic_history:
return self._current_traffic_history[timestep]
# no history exists at requested timestamp
return {}
@staticmethod
def apply_map_location_offset(position, map_offset):
return [pos + map_offset[i] for i, pos in enumerate(position[:2])]
@staticmethod
def fetch_agent_missions(
history_file_path: str, scenario_root_path: str, mapLocationOffset
):
assert os.path.isdir(scenario_root_path)
history_mission_filepath = os.path.join(
scenario_root_path, "history_mission.pkl"
)
if not os.path.exists(history_mission_filepath):
history_mission = {}
else:
with open(history_mission_filepath, "rb") as f:
history_mission = pickle.load(f)
if history_file_path in history_mission:
return history_mission[history_file_path]
vehicle_missions = {}
with open(history_file_path, "rb") as f:
for t, vehicles_state in ijson.kvitems(f, "", use_float=True):
for vehicle_id in vehicles_state:
if vehicle_id in vehicle_missions:
continue
vehicle_missions[vehicle_id] = scenario.Mission(
start=scenario.Start(
Traffic_history_service.apply_map_location_offset(
vehicles_state[vehicle_id]["position"],
mapLocationOffset,
),
scenario.Heading(vehicles_state[vehicle_id]["heading"]),
),
goal=scenario.EndlessGoal(),
start_time=float(t),
)
history_mission[history_file_path] = vehicle_missions
# update cached history_mission_file
with open(history_mission_filepath, "wb") as f:
pickle.dump(history_mission, f)
return vehicle_missions
| # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import os
import pickle
from dataclasses import dataclass
from multiprocessing import Pipe, Process, Queue
import ijson
import smarts.core.scenario as scenario
@dataclass
class RequestHistoryRange:
start_index: int
batch_count: int
class Traffic_history_service:
"""responsible for dynamically fetching traffic history json to reduce
memory use of traffic history data
"""
class QueueDone:
pass
def __init__(self, history_file_path):
self._history_file_path = history_file_path
self._all_timesteps = set()
self._current_traffic_history = {}
self._prev_batch_history = {}
# return if traffic history is not used
if history_file_path is None:
return
self._log = logging.getLogger(self.__class__.__name__)
send_data_conn, receive_data_conn = Pipe()
self._receive_data_conn = receive_data_conn
self._request_queue = Queue()
self._fetch_history_proc = Process(
target=self._fetch_history,
args=(
send_data_conn,
self._request_queue,
self._history_file_path,
),
)
self._fetch_history_proc.daemon = True
self._fetch_history_proc.start()
self._range_start = 0
self._batch_size = 300
# initialize
with open(self._history_file_path, "rb") as f:
for index, (t, vehicles_state) in enumerate(
ijson.kvitems(f, "", use_float=True)
):
self._all_timesteps.add(t)
if (
self._range_start <= index
and index < self._range_start + self._batch_size
):
self._current_traffic_history[t] = vehicles_state
self._range_start += self._batch_size
# prepares the next batch
self._prepare_next_batch()
self._receive_data_conn.recv()
def teardown(self):
if self.is_in_use:
self._request_queue.put(Traffic_history_service.QueueDone())
self._request_queue.close()
self._request_queue = None
self._fetch_history_proc.join(timeout=3)
if self._fetch_history_proc.is_alive():
self._log.warning("fetch history process still alive after teardown")
self._fetch_history_proc = None
self._history_file_path = None
def __del__(self):
self.teardown()
@property
def is_in_use(self):
return self._history_file_path is not None
def _fetch_history(self, send_data_conn, request_queue, history_file_path):
"""prepare 1 batch ahead, when received request, immediately return the previously
prepared batch and prepares the next batch.
"""
return_batch = {}
while True:
historyRange = request_queue.get()
if type(historyRange) is Traffic_history_service.QueueDone:
break
assert isinstance(historyRange, RequestHistoryRange)
send_data_conn.send(return_batch)
return_batch = {}
with open(history_file_path, "rb") as f:
for index, (t, vehicles_state) in enumerate(
ijson.kvitems(f, "", use_float=True)
):
if (
historyRange.start_index <= index
and index < historyRange.start_index + historyRange.batch_count
):
return_batch[t] = vehicles_state
send_data_conn.close()
@property
def all_timesteps(self):
return self._all_timesteps
@property
def history_file_path(self):
return self._history_file_path
@property
def traffic_history(self):
return {**self._current_traffic_history, **self._prev_batch_history}
def _prepare_next_batch(self):
self._request_queue.put(
RequestHistoryRange(
start_index=self._range_start,
batch_count=self._batch_size,
)
)
self._range_start += self._batch_size
def fetch_history_at_timestep(self, timestep: str):
if timestep not in self._all_timesteps:
return {}
elif timestep in self.traffic_history:
return self.traffic_history[timestep]
# ask child process to prepare the next batch:
self._prepare_next_batch()
self._prev_batch_history = self._current_traffic_history
# receives the previous batch child process prepared
self._current_traffic_history = self._receive_data_conn.recv()
if timestep in self._current_traffic_history:
return self._current_traffic_history[timestep]
# no history exists at requested timestamp
return {}
@staticmethod
def apply_map_location_offset(position, map_offset):
return [pos + map_offset[i] for i, pos in enumerate(position[:2])]
@staticmethod
def fetch_agent_missions(
history_file_path: str, scenario_root_path: str, mapLocationOffset
):
assert os.path.isdir(scenario_root_path)
history_mission_filepath = os.path.join(
scenario_root_path, "history_mission.pkl"
)
if not os.path.exists(history_mission_filepath):
history_mission = {}
else:
with open(history_mission_filepath, "rb") as f:
history_mission = pickle.load(f)
if history_file_path in history_mission:
return history_mission[history_file_path]
vehicle_missions = {}
with open(history_file_path, "rb") as f:
for t, vehicles_state in ijson.kvitems(f, "", use_float=True):
for vehicle_id in vehicles_state:
if vehicle_id in vehicle_missions:
continue
vehicle_missions[vehicle_id] = scenario.Mission(
start=scenario.Start(
Traffic_history_service.apply_map_location_offset(
vehicles_state[vehicle_id]["position"],
mapLocationOffset,
),
scenario.Heading(vehicles_state[vehicle_id]["heading"]),
),
goal=scenario.EndlessGoal(),
start_time=float(t),
)
history_mission[history_file_path] = vehicle_missions
# update cached history_mission_file
with open(history_mission_filepath, "wb") as f:
pickle.dump(history_mission, f)
return vehicle_missions
| en | 0.808112 | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. responsible for dynamically fetching traffic history json to reduce memory use of traffic history data # return if traffic history is not used # initialize # prepares the next batch prepare 1 batch ahead, when received request, immediately return the previously prepared batch and prepares the next batch. # ask child process to prepare the next batch: # receives the previous batch child process prepared # no history exists at requested timestamp # update cached history_mission_file | 1.801374 | 2 |
src/basics/sql_lite/update_data.py | FoxNeo/MyPythonProjects | 0 | 10086 | <gh_stars>0
import sqlite3
connect = sqlite3.connect("production.db")
cursor = connect.cursor()
cursor.execute("UPDATE PERSON SET edad = 19 WHERE nombre = 'Conker'")
connect.commit()
connect.close()
| import sqlite3
connect = sqlite3.connect("production.db")
cursor = connect.cursor()
cursor.execute("UPDATE PERSON SET edad = 19 WHERE nombre = 'Conker'")
connect.commit()
connect.close() | none | 1 | 2.573106 | 3 |
|
calvin/runtime/south/plugins/media/defaultimpl/image.py | josrolgil/exjobbCalvin | 1 | 10087 | <filename>calvin/runtime/south/plugins/media/defaultimpl/image.py
import pygame
from StringIO import StringIO
import cv2
import os
import numpy
class Image(object):
"""
Image object
"""
def __init__(self):
self.display = None
def show_image(self, image, width, height):
"""
Show image
"""
size = (width, height)
self.display = pygame.display.set_mode(size, 0)
self.snapshot = pygame.surface.Surface(size, 0, self.display)
img = pygame.image.load(StringIO(image))
self.display.blit(img, (0, 0))
pygame.display.flip()
def detect_face(self, image):
linux_prefix = "/usr/share/opencv"
mac_prefix = "/usr/local/share/OpenCV"
suffix = "/haarcascades/haarcascade_frontalface_default.xml"
linux_path = linux_prefix + suffix
mac_path = mac_prefix + suffix
if os.path.exists(linux_path) :
cpath = linux_path
elif os.path.exists(mac_path) :
cpath = mac_path
else :
raise Exception("No Haarcascade found")
classifier = cv2.CascadeClassifier(cpath)
jpg = numpy.fromstring(image, numpy.int8)
image = cv2.imdecode(jpg, 1)
faces = classifier.detectMultiScale(image)
if len(faces) > 0 :
for (x,y,w,h) in faces :
if w < 120 :
# Too small to be a nearby face
continue
return True
return False
def close(self):
"""
Close display
"""
if not self.display is None:
pygame.display.quit()
| <filename>calvin/runtime/south/plugins/media/defaultimpl/image.py
import pygame
from StringIO import StringIO
import cv2
import os
import numpy
class Image(object):
"""
Image object
"""
def __init__(self):
self.display = None
def show_image(self, image, width, height):
"""
Show image
"""
size = (width, height)
self.display = pygame.display.set_mode(size, 0)
self.snapshot = pygame.surface.Surface(size, 0, self.display)
img = pygame.image.load(StringIO(image))
self.display.blit(img, (0, 0))
pygame.display.flip()
def detect_face(self, image):
linux_prefix = "/usr/share/opencv"
mac_prefix = "/usr/local/share/OpenCV"
suffix = "/haarcascades/haarcascade_frontalface_default.xml"
linux_path = linux_prefix + suffix
mac_path = mac_prefix + suffix
if os.path.exists(linux_path) :
cpath = linux_path
elif os.path.exists(mac_path) :
cpath = mac_path
else :
raise Exception("No Haarcascade found")
classifier = cv2.CascadeClassifier(cpath)
jpg = numpy.fromstring(image, numpy.int8)
image = cv2.imdecode(jpg, 1)
faces = classifier.detectMultiScale(image)
if len(faces) > 0 :
for (x,y,w,h) in faces :
if w < 120 :
# Too small to be a nearby face
continue
return True
return False
def close(self):
"""
Close display
"""
if not self.display is None:
pygame.display.quit()
| en | 0.746649 | Image object Show image # Too small to be a nearby face Close display | 2.935783 | 3 |
durin/models.py | mlodic/django-rest-durin | 0 | 10088 | <filename>durin/models.py<gh_stars>0
import binascii
from os import urandom
import humanize
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from durin.settings import durin_settings
from durin.signals import token_renewed
User = settings.AUTH_USER_MODEL
def _create_token_string() -> str:
return binascii.hexlify(
urandom(int(durin_settings.TOKEN_CHARACTER_LENGTH / 2))
).decode()
class Client(models.Model):
name = models.CharField(
max_length=64,
null=False,
blank=False,
db_index=True,
unique=True,
help_text=_("A unique identification name for the client."),
)
token_ttl = models.DurationField(
null=False,
default=durin_settings.DEFAULT_TOKEN_TTL,
help_text=_(
"""
Token Time To Live (TTL) in timedelta. Format: <em>DAYS HH:MM:SS</em>.
"""
),
)
def __str__(self):
td = humanize.naturaldelta(self.token_ttl)
return "({0}, {1})".format(self.name, td)
class AuthTokenManager(models.Manager):
def create(self, user, client, delta_ttl=None):
token = _create_token_string()
if delta_ttl is not None:
expiry = timezone.now() + delta_ttl
else:
expiry = timezone.now() + client.token_ttl
instance = super(AuthTokenManager, self).create(
token=token, user=user, client=client, expiry=expiry
)
return instance
class AuthToken(models.Model):
class Meta:
constraints = [
models.UniqueConstraint(
fields=["user", "client"], name="unique token for user per client"
)
]
objects = AuthTokenManager()
token = models.CharField(
max_length=durin_settings.TOKEN_CHARACTER_LENGTH,
null=False,
blank=False,
db_index=True,
unique=True,
help_text=_("Token is auto-generated on save."),
)
user = models.ForeignKey(
User,
null=False,
blank=False,
related_name="auth_token_set",
on_delete=models.CASCADE,
)
client = models.ForeignKey(
Client,
null=False,
blank=False,
related_name="auth_token_set",
on_delete=models.CASCADE,
)
created = models.DateTimeField(auto_now_add=True)
expiry = models.DateTimeField(null=False)
def renew_token(self, renewed_by):
new_expiry = timezone.now() + self.client.token_ttl
self.expiry = new_expiry
self.save(update_fields=("expiry",))
token_renewed.send(
sender=renewed_by,
username=self.user.get_username(),
token_id=self.pk,
expiry=new_expiry,
)
return new_expiry
@property
def expires_in(self) -> str:
if self.expiry:
td = self.expiry - self.created
return humanize.naturaldelta(td)
else:
return "N/A"
@property
def has_expired(self) -> bool:
return timezone.now() > self.expiry
def __repr__(self) -> str:
return "({0}, {1}/{2})".format(
self.token, self.user.get_username(), self.client.name
)
def __str__(self) -> str:
return self.token
| <filename>durin/models.py<gh_stars>0
import binascii
from os import urandom
import humanize
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from durin.settings import durin_settings
from durin.signals import token_renewed
User = settings.AUTH_USER_MODEL
def _create_token_string() -> str:
return binascii.hexlify(
urandom(int(durin_settings.TOKEN_CHARACTER_LENGTH / 2))
).decode()
class Client(models.Model):
name = models.CharField(
max_length=64,
null=False,
blank=False,
db_index=True,
unique=True,
help_text=_("A unique identification name for the client."),
)
token_ttl = models.DurationField(
null=False,
default=durin_settings.DEFAULT_TOKEN_TTL,
help_text=_(
"""
Token Time To Live (TTL) in timedelta. Format: <em>DAYS HH:MM:SS</em>.
"""
),
)
def __str__(self):
td = humanize.naturaldelta(self.token_ttl)
return "({0}, {1})".format(self.name, td)
class AuthTokenManager(models.Manager):
def create(self, user, client, delta_ttl=None):
token = _create_token_string()
if delta_ttl is not None:
expiry = timezone.now() + delta_ttl
else:
expiry = timezone.now() + client.token_ttl
instance = super(AuthTokenManager, self).create(
token=token, user=user, client=client, expiry=expiry
)
return instance
class AuthToken(models.Model):
class Meta:
constraints = [
models.UniqueConstraint(
fields=["user", "client"], name="unique token for user per client"
)
]
objects = AuthTokenManager()
token = models.CharField(
max_length=durin_settings.TOKEN_CHARACTER_LENGTH,
null=False,
blank=False,
db_index=True,
unique=True,
help_text=_("Token is auto-generated on save."),
)
user = models.ForeignKey(
User,
null=False,
blank=False,
related_name="auth_token_set",
on_delete=models.CASCADE,
)
client = models.ForeignKey(
Client,
null=False,
blank=False,
related_name="auth_token_set",
on_delete=models.CASCADE,
)
created = models.DateTimeField(auto_now_add=True)
expiry = models.DateTimeField(null=False)
def renew_token(self, renewed_by):
new_expiry = timezone.now() + self.client.token_ttl
self.expiry = new_expiry
self.save(update_fields=("expiry",))
token_renewed.send(
sender=renewed_by,
username=self.user.get_username(),
token_id=self.pk,
expiry=new_expiry,
)
return new_expiry
@property
def expires_in(self) -> str:
if self.expiry:
td = self.expiry - self.created
return humanize.naturaldelta(td)
else:
return "N/A"
@property
def has_expired(self) -> bool:
return timezone.now() > self.expiry
def __repr__(self) -> str:
return "({0}, {1}/{2})".format(
self.token, self.user.get_username(), self.client.name
)
def __str__(self) -> str:
return self.token
| en | 0.170369 | Token Time To Live (TTL) in timedelta. Format: <em>DAYS HH:MM:SS</em>. | 2.099509 | 2 |
linter.py | CudaText-addons/cuda_lint_htmltidy | 0 | 10089 | <filename>linter.py
# Copyright (c) 2013 <NAME>
# Change for CudaLint: <NAME>.
# License: MIT
import os
from cuda_lint import Linter, util
if os.name=='nt':
_exe = os.path.join(os.path.dirname(__file__), 'tidy_win32', 'tidy')
else:
_exe = 'tidy'
class HtmlTidy(Linter):
syntax = ('HTML', 'HTML_')
cmd = (_exe, '-errors', '-quiet', '-utf8')
regex = r'^line (?P<line>\d+) column (?P<col>\d+) - (?:(?P<error>Error)|(?P<warning>Warning)): (?P<message>.+)'
error_stream = util.STREAM_STDERR
| <filename>linter.py
# Copyright (c) 2013 <NAME>
# Change for CudaLint: <NAME>.
# License: MIT
import os
from cuda_lint import Linter, util
if os.name=='nt':
_exe = os.path.join(os.path.dirname(__file__), 'tidy_win32', 'tidy')
else:
_exe = 'tidy'
class HtmlTidy(Linter):
syntax = ('HTML', 'HTML_')
cmd = (_exe, '-errors', '-quiet', '-utf8')
regex = r'^line (?P<line>\d+) column (?P<col>\d+) - (?:(?P<error>Error)|(?P<warning>Warning)): (?P<message>.+)'
error_stream = util.STREAM_STDERR
| en | 0.556904 | # Copyright (c) 2013 <NAME> # Change for CudaLint: <NAME>. # License: MIT | 2.256847 | 2 |
PP4E-Examples-1.4/Examples/PP4E/Dstruct/Basic/inter2.py | AngelLiang/PP4E | 0 | 10090 | "set operations for multiple sequences"
def intersect(*args):
res = []
for x in args[0]: # scan the first list
for other in args[1:]: # for all other arguments
if x not in other: break # this item in each one?
else:
res.append(x) # add common items to the end
return res
def union(*args):
res = []
for seq in args: # for all sequence-arguments
for x in seq: # for all nodes in argument
if not x in res:
res.append(x) # add new items to result
return res
| "set operations for multiple sequences"
def intersect(*args):
res = []
for x in args[0]: # scan the first list
for other in args[1:]: # for all other arguments
if x not in other: break # this item in each one?
else:
res.append(x) # add common items to the end
return res
def union(*args):
res = []
for seq in args: # for all sequence-arguments
for x in seq: # for all nodes in argument
if not x in res:
res.append(x) # add new items to result
return res
| en | 0.601896 | # scan the first list # for all other arguments # this item in each one? # add common items to the end # for all sequence-arguments # for all nodes in argument # add new items to result | 3.582972 | 4 |
airtech_api/utils/error_messages/serialization_errors.py | chidioguejiofor/airtech-api | 1 | 10091 | <reponame>chidioguejiofor/airtech-api<gh_stars>1-10
msg_dict = {
'resource_not_found':
'The resource you specified was not found',
'invalid_gender':
"The gender you specified is invalid!!",
'many_invalid_fields':
'Some errors occured while validating some fields. Please check and try again',
'unique':
'The {} you inputted already exists',
'user_not_found':
'The user with that username/email and password combination was not found',
'email_not_found':
'A user with email `{}` does not exist',
'user_already_verified':
'The user with that email has already been verified',
'invalid_flight_type':
'Flight type must be either international or local',
'invalid_flight_schedule':
'Flight schedule must be at least 12 hours before it is created',
'resource_id_not_found':
'The {} with that id was not found',
'user_book_flight_twice':
'You had previously booked for this Flight and thus cannot do it again',
'flight_booking_expired':
'You cannot book for a flight less than 24 hours before the flight',
'flight_schedule_expired':
'The schedule of this flight has already passed and thus you cannot book it',
'missing_field':
'You forgot to include this field',
'value_not_a_file':
'The value you inputted is not a file',
'not_an_image':
'The file you uploaded is not a valid image',
'image_too_large':
'Image must not be more than 2MB',
'payment_link_error':
'An error occurred while creating payment link',
'booking_already_paid':
'You have already paid for this flight',
'booking_expired':
'Your booking has expired, thus you cannot pay for this ticket',
'invalid_url':
'The `{}` field must be a valid URL with protocols `http` or `https`',
"invalid_url_field":
'This field must be a valid URL with protocols `http` or `https`',
'paystack_threw_error':
"There was an unexpected error while processing request. "
"Please raise this as an issue in at "
"https://github.com/chidioguejiofor/airtech-api/issues",
'empty_request':
'You did not specify any `{}` data in your request',
'paid_booking_cannot_be_deleted':
'You cannot delete this Booking because you have already paid for it',
'cannot_delete_expired_booking':
'You cannot delete an expired booking',
'cannot_delete_flight_with_bookings':
'You cannot delete this flight because users have started booking it',
'cannot_delete_flight_that_has_flown':
'You cannot delete this flight because the schedule date has been passed',
'cannot_update_flight_field_with_bookings':
'You cannot update the `{}` of this flight because it has already been booked',
'cannot_update_field':
'You cannot update a {} {}',
'regular_user_only':
'This endpoint is for only regular users',
'profile_not_updated':
'You need to update your profile picture before you can do this',
'only_alpha_and_numbers':
'This field can contain only alphabets and numbers'
}
| msg_dict = {
'resource_not_found':
'The resource you specified was not found',
'invalid_gender':
"The gender you specified is invalid!!",
'many_invalid_fields':
'Some errors occured while validating some fields. Please check and try again',
'unique':
'The {} you inputted already exists',
'user_not_found':
'The user with that username/email and password combination was not found',
'email_not_found':
'A user with email `{}` does not exist',
'user_already_verified':
'The user with that email has already been verified',
'invalid_flight_type':
'Flight type must be either international or local',
'invalid_flight_schedule':
'Flight schedule must be at least 12 hours before it is created',
'resource_id_not_found':
'The {} with that id was not found',
'user_book_flight_twice':
'You had previously booked for this Flight and thus cannot do it again',
'flight_booking_expired':
'You cannot book for a flight less than 24 hours before the flight',
'flight_schedule_expired':
'The schedule of this flight has already passed and thus you cannot book it',
'missing_field':
'You forgot to include this field',
'value_not_a_file':
'The value you inputted is not a file',
'not_an_image':
'The file you uploaded is not a valid image',
'image_too_large':
'Image must not be more than 2MB',
'payment_link_error':
'An error occurred while creating payment link',
'booking_already_paid':
'You have already paid for this flight',
'booking_expired':
'Your booking has expired, thus you cannot pay for this ticket',
'invalid_url':
'The `{}` field must be a valid URL with protocols `http` or `https`',
"invalid_url_field":
'This field must be a valid URL with protocols `http` or `https`',
'paystack_threw_error':
"There was an unexpected error while processing request. "
"Please raise this as an issue in at "
"https://github.com/chidioguejiofor/airtech-api/issues",
'empty_request':
'You did not specify any `{}` data in your request',
'paid_booking_cannot_be_deleted':
'You cannot delete this Booking because you have already paid for it',
'cannot_delete_expired_booking':
'You cannot delete an expired booking',
'cannot_delete_flight_with_bookings':
'You cannot delete this flight because users have started booking it',
'cannot_delete_flight_that_has_flown':
'You cannot delete this flight because the schedule date has been passed',
'cannot_update_flight_field_with_bookings':
'You cannot update the `{}` of this flight because it has already been booked',
'cannot_update_field':
'You cannot update a {} {}',
'regular_user_only':
'This endpoint is for only regular users',
'profile_not_updated':
'You need to update your profile picture before you can do this',
'only_alpha_and_numbers':
'This field can contain only alphabets and numbers'
} | none | 1 | 2.547548 | 3 |
|
blog/be/server/serialization/__init__.py | kamko/lnu_ht19_4ME310_final_project | 0 | 10092 | from .marshmallow import ma
from .schemas import ArticleSchema
__all__ = [
'ma',
'ArticleSchema'
]
| from .marshmallow import ma
from .schemas import ArticleSchema
__all__ = [
'ma',
'ArticleSchema'
]
| none | 1 | 1.056647 | 1 |
|
scripts/fast_queue.py | ourresearch/openalex-guts | 48 | 10093 | import argparse
from time import sleep, time
from collections import defaultdict
from sqlalchemy import orm, text, insert, delete
from sqlalchemy.orm import selectinload
import models
from app import db
from app import logger
from scripts.queue import JsonWorks, JsonAuthors, JsonConcepts, JsonInstitutions, JsonVenues
from util import elapsed
def run(**kwargs):
entity_type = kwargs.get("entity")
method_name = kwargs.get("method")
if entity_type == "work" and method_name == "add_everything":
queue_table = "queue.work_add_everything"
elif method_name == "store":
queue_table = f"queue.{entity_type.lower()}_store"
else:
queue_table = f"queue.{method_name.lower()}"
if single_id := kwargs.get('id'):
if objects := get_objects(entity_type, [single_id]):
logger.info(f'found object {objects[0]}')
store_objects(objects)
db.session.commit()
else:
logger.warn(f'found no object with id {single_id}')
else:
objects_updated = 0
limit = kwargs.get('limit')
chunk = kwargs.get('chunk')
total_count = 0
while limit is None or objects_updated < limit:
loop_start = time()
if object_ids := fetch_queue_chunk_ids(queue_table, chunk):
objects = get_objects(entity_type, object_ids)
for obj in objects:
method_start_time = time()
total_count += 1
print(f"*** #{total_count} starting {obj}.{method_name}() method")
method_to_run = getattr(obj, method_name)
method_to_run()
print(f">>> finished {obj}.{method_name}(). took {elapsed(method_start_time, 4)} seconds")
# print(1/0)
logger.info('committing')
start_time = time()
if method_name == "store":
store_json_objects(objects)
else:
db.session.commit() # fail loudly for now
logger.info(f'commit took {elapsed(start_time, 4)}s')
finish_object_ids(queue_table, object_ids)
objects_updated += len(objects)
logger.info(f'processed chunk of {chunk} objects in {elapsed(loop_start, 2)} seconds')
else:
logger.info('nothing ready in the queue, waiting 5 seconds...')
sleep(5)
def store_json_objects(objects):
delete_dict_all_objects = defaultdict(list)
insert_dict_all_objects = defaultdict(list)
for count, obj in enumerate(objects):
obj.delete_dict = defaultdict(list)
for row in obj.insert_dicts:
for table_name, insert_dict in row.items():
insert_dict_all_objects[table_name] += [insert_dict]
obj.delete_dict[table_name] += [insert_dict["id"]]
for table_name, ids in obj.delete_dict.items():
delete_dict_all_objects[table_name] += ids
start_time = time()
for table_name, delete_ids in delete_dict_all_objects.items():
my_table = globals()[table_name]
db.session.remove()
db.session.execute(delete(my_table).where(my_table.id.in_(delete_ids)))
db.session.commit()
print("delete done")
for table_name, all_insert_strings in insert_dict_all_objects.items():
my_table = globals()[table_name]
db.session.remove()
db.session.execute(insert(my_table).values(all_insert_strings))
db.session.commit()
print("insert and commit took {} seconds".format(elapsed(start_time, 2)))
def fetch_queue_chunk_ids(queue_table, chunk_size):
text_query = f"""
with chunk as (
select id
from {queue_table}
where started is null
order by
finished asc nulls first,
rand
limit :chunk
for update skip locked
)
update {queue_table}
set started = now()
from chunk
where {queue_table}.id = chunk.id
returning chunk.id;
"""
logger.info(f'getting {chunk_size} ids from the queue')
start_time = time()
ids = [
row[0] for row in
db.engine.execute(text(text_query).bindparams(chunk=chunk_size).execution_options(autocommit=True)).all()
]
logger.info(f'got {len(ids)} ids from the queue in {elapsed(start_time, 4)}s')
logger.info(f'got these ids: {ids}')
return ids
def finish_object_ids(queue_table, object_ids):
# logger.info(f'finishing queue chunk')
start_time = time()
query_text = f'''
update {queue_table}
set finished = now(), started=null
where id = any(:ids)
'''
db.session.execute(text(query_text).bindparams(ids=object_ids))
db.session.commit()
# logger.info(f'finished saving finish_objects in {elapsed(start_time, 4)}s')
def get_objects(entity_type, object_ids):
logger.info(f'getting {len(object_ids)} objects')
start_time = time()
if entity_type == "work":
objects = db.session.query(models.Work).options(
selectinload(models.Work.records).selectinload(models.Record.journals).raiseload('*'),
selectinload(models.Work.records).raiseload('*'),
selectinload(models.Work.locations),
selectinload(models.Work.journal).raiseload('*'),
selectinload(models.Work.references).raiseload('*'),
selectinload(models.Work.references_unmatched).raiseload('*'),
selectinload(models.Work.mesh),
selectinload(models.Work.counts_by_year).raiseload('*'),
selectinload(models.Work.abstract),
selectinload(models.Work.extra_ids).raiseload('*'),
selectinload(models.Work.related_works).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.author).selectinload(models.Author.orcids).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.author).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.institution).selectinload(models.Institution.ror).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.institution).raiseload('*'),
selectinload(models.Work.concepts).selectinload(models.WorkConcept.concept).raiseload('*'),
selectinload(models.Work.concepts_full).raiseload('*'),
orm.Load(models.Work).raiseload('*')
).filter(models.Work.paper_id.in_(object_ids)).all()
elif entity_type == "author":
objects = db.session.query(models.Author).options(
selectinload(models.Author.counts_by_year_papers),
selectinload(models.Author.counts_by_year_citations),
selectinload(models.Author.alternative_names),
selectinload(models.Author.author_concepts),
selectinload(models.Author.orcids).selectinload(models.AuthorOrcid.orcid_data),
selectinload(models.Author.last_known_institution).selectinload(models.Institution.ror).raiseload('*'),
selectinload(models.Author.last_known_institution).raiseload('*'),
orm.Load(models.Author).raiseload('*')
).filter(models.Author.author_id.in_(object_ids)).all()
elif entity_type == "venue":
objects = db.session.query(models.Venue).options(
selectinload(models.Venue.counts_by_year_papers),
selectinload(models.Venue.counts_by_year_citations),
orm.Load(models.Venue).raiseload('*')
).filter(models.Venue.journal_id.in_(object_ids)).all()
elif entity_type == "institution":
objects = db.session.query(models.Institution).filter(models.Institution.affiliation_id.in_(object_ids)).all()
elif entity_type == "concept":
objects = db.session.query(models.Concept).filter(models.Concept.field_of_study_id.in_(object_ids)).all()
logger.info(f'got {len(objects)} objects in {elapsed(start_time, 4)}s')
return objects
# python -m scripts.fast_queue --entity=work --method=add_everything --limit=3
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run fast queue.")
parser.add_argument('--entity', type=str, help="the entity type to run")
parser.add_argument('--method', type=str, help="the method to run")
parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update (case sensitive)")
parser.add_argument('--limit', "-l", nargs="?", type=int, help="how many objects to work on")
parser.add_argument(
'--chunk', "-ch", nargs="?", default=100, type=int, help="how many objects to take off the queue at once"
)
parsed_args = parser.parse_args()
run(**vars(parsed_args))
| import argparse
from time import sleep, time
from collections import defaultdict
from sqlalchemy import orm, text, insert, delete
from sqlalchemy.orm import selectinload
import models
from app import db
from app import logger
from scripts.queue import JsonWorks, JsonAuthors, JsonConcepts, JsonInstitutions, JsonVenues
from util import elapsed
def run(**kwargs):
entity_type = kwargs.get("entity")
method_name = kwargs.get("method")
if entity_type == "work" and method_name == "add_everything":
queue_table = "queue.work_add_everything"
elif method_name == "store":
queue_table = f"queue.{entity_type.lower()}_store"
else:
queue_table = f"queue.{method_name.lower()}"
if single_id := kwargs.get('id'):
if objects := get_objects(entity_type, [single_id]):
logger.info(f'found object {objects[0]}')
store_objects(objects)
db.session.commit()
else:
logger.warn(f'found no object with id {single_id}')
else:
objects_updated = 0
limit = kwargs.get('limit')
chunk = kwargs.get('chunk')
total_count = 0
while limit is None or objects_updated < limit:
loop_start = time()
if object_ids := fetch_queue_chunk_ids(queue_table, chunk):
objects = get_objects(entity_type, object_ids)
for obj in objects:
method_start_time = time()
total_count += 1
print(f"*** #{total_count} starting {obj}.{method_name}() method")
method_to_run = getattr(obj, method_name)
method_to_run()
print(f">>> finished {obj}.{method_name}(). took {elapsed(method_start_time, 4)} seconds")
# print(1/0)
logger.info('committing')
start_time = time()
if method_name == "store":
store_json_objects(objects)
else:
db.session.commit() # fail loudly for now
logger.info(f'commit took {elapsed(start_time, 4)}s')
finish_object_ids(queue_table, object_ids)
objects_updated += len(objects)
logger.info(f'processed chunk of {chunk} objects in {elapsed(loop_start, 2)} seconds')
else:
logger.info('nothing ready in the queue, waiting 5 seconds...')
sleep(5)
def store_json_objects(objects):
delete_dict_all_objects = defaultdict(list)
insert_dict_all_objects = defaultdict(list)
for count, obj in enumerate(objects):
obj.delete_dict = defaultdict(list)
for row in obj.insert_dicts:
for table_name, insert_dict in row.items():
insert_dict_all_objects[table_name] += [insert_dict]
obj.delete_dict[table_name] += [insert_dict["id"]]
for table_name, ids in obj.delete_dict.items():
delete_dict_all_objects[table_name] += ids
start_time = time()
for table_name, delete_ids in delete_dict_all_objects.items():
my_table = globals()[table_name]
db.session.remove()
db.session.execute(delete(my_table).where(my_table.id.in_(delete_ids)))
db.session.commit()
print("delete done")
for table_name, all_insert_strings in insert_dict_all_objects.items():
my_table = globals()[table_name]
db.session.remove()
db.session.execute(insert(my_table).values(all_insert_strings))
db.session.commit()
print("insert and commit took {} seconds".format(elapsed(start_time, 2)))
def fetch_queue_chunk_ids(queue_table, chunk_size):
text_query = f"""
with chunk as (
select id
from {queue_table}
where started is null
order by
finished asc nulls first,
rand
limit :chunk
for update skip locked
)
update {queue_table}
set started = now()
from chunk
where {queue_table}.id = chunk.id
returning chunk.id;
"""
logger.info(f'getting {chunk_size} ids from the queue')
start_time = time()
ids = [
row[0] for row in
db.engine.execute(text(text_query).bindparams(chunk=chunk_size).execution_options(autocommit=True)).all()
]
logger.info(f'got {len(ids)} ids from the queue in {elapsed(start_time, 4)}s')
logger.info(f'got these ids: {ids}')
return ids
def finish_object_ids(queue_table, object_ids):
# logger.info(f'finishing queue chunk')
start_time = time()
query_text = f'''
update {queue_table}
set finished = now(), started=null
where id = any(:ids)
'''
db.session.execute(text(query_text).bindparams(ids=object_ids))
db.session.commit()
# logger.info(f'finished saving finish_objects in {elapsed(start_time, 4)}s')
def get_objects(entity_type, object_ids):
logger.info(f'getting {len(object_ids)} objects')
start_time = time()
if entity_type == "work":
objects = db.session.query(models.Work).options(
selectinload(models.Work.records).selectinload(models.Record.journals).raiseload('*'),
selectinload(models.Work.records).raiseload('*'),
selectinload(models.Work.locations),
selectinload(models.Work.journal).raiseload('*'),
selectinload(models.Work.references).raiseload('*'),
selectinload(models.Work.references_unmatched).raiseload('*'),
selectinload(models.Work.mesh),
selectinload(models.Work.counts_by_year).raiseload('*'),
selectinload(models.Work.abstract),
selectinload(models.Work.extra_ids).raiseload('*'),
selectinload(models.Work.related_works).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.author).selectinload(models.Author.orcids).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.author).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.institution).selectinload(models.Institution.ror).raiseload('*'),
selectinload(models.Work.affiliations).selectinload(models.Affiliation.institution).raiseload('*'),
selectinload(models.Work.concepts).selectinload(models.WorkConcept.concept).raiseload('*'),
selectinload(models.Work.concepts_full).raiseload('*'),
orm.Load(models.Work).raiseload('*')
).filter(models.Work.paper_id.in_(object_ids)).all()
elif entity_type == "author":
objects = db.session.query(models.Author).options(
selectinload(models.Author.counts_by_year_papers),
selectinload(models.Author.counts_by_year_citations),
selectinload(models.Author.alternative_names),
selectinload(models.Author.author_concepts),
selectinload(models.Author.orcids).selectinload(models.AuthorOrcid.orcid_data),
selectinload(models.Author.last_known_institution).selectinload(models.Institution.ror).raiseload('*'),
selectinload(models.Author.last_known_institution).raiseload('*'),
orm.Load(models.Author).raiseload('*')
).filter(models.Author.author_id.in_(object_ids)).all()
elif entity_type == "venue":
objects = db.session.query(models.Venue).options(
selectinload(models.Venue.counts_by_year_papers),
selectinload(models.Venue.counts_by_year_citations),
orm.Load(models.Venue).raiseload('*')
).filter(models.Venue.journal_id.in_(object_ids)).all()
elif entity_type == "institution":
objects = db.session.query(models.Institution).filter(models.Institution.affiliation_id.in_(object_ids)).all()
elif entity_type == "concept":
objects = db.session.query(models.Concept).filter(models.Concept.field_of_study_id.in_(object_ids)).all()
logger.info(f'got {len(objects)} objects in {elapsed(start_time, 4)}s')
return objects
# python -m scripts.fast_queue --entity=work --method=add_everything --limit=3
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run fast queue.")
parser.add_argument('--entity', type=str, help="the entity type to run")
parser.add_argument('--method', type=str, help="the method to run")
parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update (case sensitive)")
parser.add_argument('--limit', "-l", nargs="?", type=int, help="how many objects to work on")
parser.add_argument(
'--chunk', "-ch", nargs="?", default=100, type=int, help="how many objects to take off the queue at once"
)
parsed_args = parser.parse_args()
run(**vars(parsed_args))
| en | 0.620966 | #{total_count} starting {obj}.{method_name}() method") # print(1/0) # fail loudly for now with chunk as ( select id from {queue_table} where started is null order by finished asc nulls first, rand limit :chunk for update skip locked ) update {queue_table} set started = now() from chunk where {queue_table}.id = chunk.id returning chunk.id; # logger.info(f'finishing queue chunk') update {queue_table} set finished = now(), started=null where id = any(:ids) # logger.info(f'finished saving finish_objects in {elapsed(start_time, 4)}s') # python -m scripts.fast_queue --entity=work --method=add_everything --limit=3 | 2.237686 | 2 |
cogs/memes.py | Code-Cecilia/botman-rewrite | 2 | 10094 | import json
import discord
from discord.ext import commands
from assets import internet_funcs
from assets.list_funcs import chunks
class Memes(commands.Cog, description="Memes from https://imgflip.com/"):
def __init__(self, bot):
self.bot = bot
with open("config.json") as configFile:
config = json.load(configFile)
self.username = config.get("imgflip_username")
self.password = config.get("imgflip_password")
self.memetemps = {}
@commands.Cog.listener()
async def on_ready(self):
result = json.loads(await internet_funcs.get_response("https://api.imgflip.com/get_memes"))
if result["success"] is not True:
return
result = result["data"]["memes"]
for k in result:
self.memetemps[k["id"]] = {"name": k["name"], "box_count": k["box_count"]}
@commands.command(name="memetemplates", aliases=["memetemps"])
async def meme_temps(self, ctx):
"""Fetches top 100 meme templates from imgflip.com"""
# TODO: pagination for meme templates
result = list(self.memetemps.items())
if not result:
await self.on_ready()
result = list(self.memetemps.items())
n = 0
split_entries = list(chunks(result, 25))
for entry in split_entries:
embed = discord.Embed(title="Meme Templates", color=0x00ff00)
for meme in entry:
n += 1
meme_id = meme[0]
meme_name = meme[1]["name"]
embed.add_field(name=f"{n}. {meme_name}", value=f"ID: `{meme_id}`", inline=False)
try:
await ctx.author.send(embed=embed)
except discord.Forbidden:
await ctx.send("I can't DM you! Please enable DMs and try again.")
return
@commands.command(name="memegen", aliases=["memegenerator"])
async def meme_gen(self, ctx, meme_id, *text):
"""Generates a meme from imgflip. For template IDs, see the `memetemplates` command"""
text = list(text)
if self.memetemps == {}:
await self.on_ready()
if len(text) > 20:
text = text[:20]
if not str(meme_id).isnumeric():
found = False
for k, v in self.memetemps.items():
if str(meme_id).lower() == str(v["name"]).lower():
meme_id = int(k)
found = True
break
if not found:
return await ctx.send("Meme not found. Please check the ID and try again.")
# clean up the number of boxes to send
if meme_id in self.memetemps.keys():
if len(text) > self.memetemps[meme_id]["box_count"]:
text = text[:int(self.memetemps[meme_id]["box_count"])]
if len(text) < self.memetemps[meme_id]["box_count"]:
text += [""] * int(self.memetemps[meme_id]["box_count"] - len(text))
# ready the text boxes
boxes_dict = {}
for box_count in range(len(text)):
boxes_dict[f"boxes[{box_count}][text]"] = text[box_count]
boxes_dict[f"boxes[{box_count}][color]"] = "#000000"
boxes_dict[f"boxes[{box_count}][outline_color]"] = "#FFFFFF"
# send the request
payload = {"template_id": meme_id, "username": self.username, "password": self.password}
payload.update(boxes_dict)
result = json.loads(await internet_funcs.post("https://api.imgflip.com/caption_image", data=payload))
if result["success"] is not True:
await ctx.send("An error occurred:" + " " + "**" + result["error_message"] + "**")
return
await ctx.send(result["data"]["url"])
def setup(bot):
bot.add_cog(Memes(bot))
| import json
import discord
from discord.ext import commands
from assets import internet_funcs
from assets.list_funcs import chunks
class Memes(commands.Cog, description="Memes from https://imgflip.com/"):
def __init__(self, bot):
self.bot = bot
with open("config.json") as configFile:
config = json.load(configFile)
self.username = config.get("imgflip_username")
self.password = config.get("imgflip_password")
self.memetemps = {}
@commands.Cog.listener()
async def on_ready(self):
result = json.loads(await internet_funcs.get_response("https://api.imgflip.com/get_memes"))
if result["success"] is not True:
return
result = result["data"]["memes"]
for k in result:
self.memetemps[k["id"]] = {"name": k["name"], "box_count": k["box_count"]}
@commands.command(name="memetemplates", aliases=["memetemps"])
async def meme_temps(self, ctx):
"""Fetches top 100 meme templates from imgflip.com"""
# TODO: pagination for meme templates
result = list(self.memetemps.items())
if not result:
await self.on_ready()
result = list(self.memetemps.items())
n = 0
split_entries = list(chunks(result, 25))
for entry in split_entries:
embed = discord.Embed(title="Meme Templates", color=0x00ff00)
for meme in entry:
n += 1
meme_id = meme[0]
meme_name = meme[1]["name"]
embed.add_field(name=f"{n}. {meme_name}", value=f"ID: `{meme_id}`", inline=False)
try:
await ctx.author.send(embed=embed)
except discord.Forbidden:
await ctx.send("I can't DM you! Please enable DMs and try again.")
return
@commands.command(name="memegen", aliases=["memegenerator"])
async def meme_gen(self, ctx, meme_id, *text):
"""Generates a meme from imgflip. For template IDs, see the `memetemplates` command"""
text = list(text)
if self.memetemps == {}:
await self.on_ready()
if len(text) > 20:
text = text[:20]
if not str(meme_id).isnumeric():
found = False
for k, v in self.memetemps.items():
if str(meme_id).lower() == str(v["name"]).lower():
meme_id = int(k)
found = True
break
if not found:
return await ctx.send("Meme not found. Please check the ID and try again.")
# clean up the number of boxes to send
if meme_id in self.memetemps.keys():
if len(text) > self.memetemps[meme_id]["box_count"]:
text = text[:int(self.memetemps[meme_id]["box_count"])]
if len(text) < self.memetemps[meme_id]["box_count"]:
text += [""] * int(self.memetemps[meme_id]["box_count"] - len(text))
# ready the text boxes
boxes_dict = {}
for box_count in range(len(text)):
boxes_dict[f"boxes[{box_count}][text]"] = text[box_count]
boxes_dict[f"boxes[{box_count}][color]"] = "#000000"
boxes_dict[f"boxes[{box_count}][outline_color]"] = "#FFFFFF"
# send the request
payload = {"template_id": meme_id, "username": self.username, "password": self.password}
payload.update(boxes_dict)
result = json.loads(await internet_funcs.post("https://api.imgflip.com/caption_image", data=payload))
if result["success"] is not True:
await ctx.send("An error occurred:" + " " + "**" + result["error_message"] + "**")
return
await ctx.send(result["data"]["url"])
def setup(bot):
bot.add_cog(Memes(bot))
| en | 0.4917 | Fetches top 100 meme templates from imgflip.com # TODO: pagination for meme templates Generates a meme from imgflip. For template IDs, see the `memetemplates` command # clean up the number of boxes to send # ready the text boxes # send the request | 2.57659 | 3 |
examples/forest_fire/run.py | fire-suppression-abm/mesa | 1,704 | 10095 | <filename>examples/forest_fire/run.py
from forest_fire.server import server
server.launch()
| <filename>examples/forest_fire/run.py
from forest_fire.server import server
server.launch()
| none | 1 | 1.095216 | 1 |
|
scripts/collect_timelines1.py | tedhchen/twitter_timeline_tools | 0 | 10096 | <reponame>tedhchen/twitter_timeline_tools
# Prep
import json, configparser, pickle, csv, logging, os
import pandas as pd
from tweepy import AppAuthHandler, API, Cursor
# Reading in configuation
params = configparser.ConfigParser()
params.read('config.ini')
# Functions
# Takes config file and returns authenticated api object
def twitter_auth(config):
auth = AppAuthHandler(params['keys']['key'], params['keys']['secret'])
api = API(auth, wait_on_rate_limit = True, wait_on_rate_limit_notify = True)
return api
# Get relevant user ids
def get_ids(path, subset = None):
df = pd.read_csv(path, header = 0, dtype = {'user': 'object', 'subset': 'object'})
if subset != None:
df.user = df.user[df['subset'] == subset]
return list(df.user)
# takes user ids, and writes out a txt file wiith each user's status jsons
def get_timelines(users, api, outfolder):
i = 0
for user in users:
timeline = []
try:
for status in Cursor(api.user_timeline, user_id = user, include_rts = True, exclude_replies = False, count = 200, tweet_mode = 'extended').items():
timeline.append(status)
timeline = [json.dumps(line._json) for line in timeline]
filename = 'timeline_' + user + '.txt'
with open(os.path.join(outfolder, filename), 'a', encoding = 'utf-8', newline = '') as outfile:
for line in timeline:
outfile.write(line + '\n')
except Exception as e:
logging.exception("Exception occurred when working with user id: " + user + '.')
i += 1
if i % 100 == 0:
print('Finished ' + str(i) + ' users.')
return None
def retry_missed_users(log, api, outfolder):
missed = []
with open(log, 'r') as infile:
for line in infile:
if 'Exception occurred when working with user id:' in line:
missed.append(line[79:-2])
get_timelines(missed, api, outfolder)
# Running script
# Setting up logger
logging.basicConfig(filename, filemode = 'a', format = '(%(asctime)s) %(levelname)s: %(message)s', level = logging.INFO)
# Authenticating api
api = twitter_auth(params)
# Get users from pre-parsed data
# csv file with:
# user, subset
# ..., ...
# subset is just a way to subset users from the csv file
# if subset == None, then no subsetting is performed
users = get_ids(path, subset)
# Getting timelines
get_timelines(users, api, outpath)
# Double checking errors
retry_missed_users(logfile, api, outpath)
| # Prep
import json, configparser, pickle, csv, logging, os
import pandas as pd
from tweepy import AppAuthHandler, API, Cursor
# Reading in configuation
params = configparser.ConfigParser()
params.read('config.ini')
# Functions
# Takes config file and returns authenticated api object
def twitter_auth(config):
auth = AppAuthHandler(params['keys']['key'], params['keys']['secret'])
api = API(auth, wait_on_rate_limit = True, wait_on_rate_limit_notify = True)
return api
# Get relevant user ids
def get_ids(path, subset = None):
df = pd.read_csv(path, header = 0, dtype = {'user': 'object', 'subset': 'object'})
if subset != None:
df.user = df.user[df['subset'] == subset]
return list(df.user)
# takes user ids, and writes out a txt file wiith each user's status jsons
def get_timelines(users, api, outfolder):
i = 0
for user in users:
timeline = []
try:
for status in Cursor(api.user_timeline, user_id = user, include_rts = True, exclude_replies = False, count = 200, tweet_mode = 'extended').items():
timeline.append(status)
timeline = [json.dumps(line._json) for line in timeline]
filename = 'timeline_' + user + '.txt'
with open(os.path.join(outfolder, filename), 'a', encoding = 'utf-8', newline = '') as outfile:
for line in timeline:
outfile.write(line + '\n')
except Exception as e:
logging.exception("Exception occurred when working with user id: " + user + '.')
i += 1
if i % 100 == 0:
print('Finished ' + str(i) + ' users.')
return None
def retry_missed_users(log, api, outfolder):
missed = []
with open(log, 'r') as infile:
for line in infile:
if 'Exception occurred when working with user id:' in line:
missed.append(line[79:-2])
get_timelines(missed, api, outfolder)
# Running script
# Setting up logger
logging.basicConfig(filename, filemode = 'a', format = '(%(asctime)s) %(levelname)s: %(message)s', level = logging.INFO)
# Authenticating api
api = twitter_auth(params)
# Get users from pre-parsed data
# csv file with:
# user, subset
# ..., ...
# subset is just a way to subset users from the csv file
# if subset == None, then no subsetting is performed
users = get_ids(path, subset)
# Getting timelines
get_timelines(users, api, outpath)
# Double checking errors
retry_missed_users(logfile, api, outpath) | en | 0.780228 | # Prep # Reading in configuation # Functions # Takes config file and returns authenticated api object # Get relevant user ids # takes user ids, and writes out a txt file wiith each user's status jsons # Running script # Setting up logger # Authenticating api # Get users from pre-parsed data # csv file with: # user, subset # ..., ... # subset is just a way to subset users from the csv file # if subset == None, then no subsetting is performed # Getting timelines # Double checking errors | 2.788349 | 3 |
aquarius/app/auth_util.py | oceanprotocol/provider-backend | 0 | 10097 | <gh_stars>0
#
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
from eth_utils import is_address
from web3 import Web3
def sanitize_addresses(addresses):
return [Web3.toChecksumAddress(a) for a in addresses if is_address(a)]
def compare_eth_addresses(address, checker, logger):
"""
Compare two addresses and return TRUE if there is a match
:param str address: Address
:param str checker: Address to compare with
:param logger: instance of logging
:return: boolean
"""
logger.debug("compare_eth_addresses address: %s" % address)
logger.debug("compare_eth_addresses checker: %s" % checker)
if not is_address(address):
logger.debug("Address is not web3 valid")
return False
if not is_address(checker):
logger.debug("Checker is not web3 valid")
return False
return Web3.toChecksumAddress(address) == Web3.toChecksumAddress(checker)
| #
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
from eth_utils import is_address
from web3 import Web3
def sanitize_addresses(addresses):
return [Web3.toChecksumAddress(a) for a in addresses if is_address(a)]
def compare_eth_addresses(address, checker, logger):
"""
Compare two addresses and return TRUE if there is a match
:param str address: Address
:param str checker: Address to compare with
:param logger: instance of logging
:return: boolean
"""
logger.debug("compare_eth_addresses address: %s" % address)
logger.debug("compare_eth_addresses checker: %s" % checker)
if not is_address(address):
logger.debug("Address is not web3 valid")
return False
if not is_address(checker):
logger.debug("Checker is not web3 valid")
return False
return Web3.toChecksumAddress(address) == Web3.toChecksumAddress(checker) | en | 0.647245 | # # Copyright 2021 Ocean Protocol Foundation # SPDX-License-Identifier: Apache-2.0 # Compare two addresses and return TRUE if there is a match :param str address: Address :param str checker: Address to compare with :param logger: instance of logging :return: boolean | 2.910401 | 3 |
oscar/lib/python2.7/site-packages/phonenumbers/data/region_DJ.py | sainjusajan/django-oscar | 0 | 10098 | """Auto-generated file, do not edit by hand. DJ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_DJ = PhoneMetadata(id='DJ', country_code=253, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[27]\\d{7}', possible_length=(8,)),
fixed_line=PhoneNumberDesc(national_number_pattern='2(?:1[2-5]|7[45])\\d{5}', example_number='21360003', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='77\\d{6}', example_number='77831001', possible_length=(8,)),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4')])
| """Auto-generated file, do not edit by hand. DJ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_DJ = PhoneMetadata(id='DJ', country_code=253, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[27]\\d{7}', possible_length=(8,)),
fixed_line=PhoneNumberDesc(national_number_pattern='2(?:1[2-5]|7[45])\\d{5}', example_number='21360003', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='77\\d{6}', example_number='77831001', possible_length=(8,)),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4')])
| en | 0.825878 | Auto-generated file, do not edit by hand. DJ metadata | 2.126104 | 2 |
llvm-7.0.0.src/utils/unicode-case-fold.py | sillywalk/grazz | 171 | 10099 | <filename>llvm-7.0.0.src/utils/unicode-case-fold.py<gh_stars>100-1000
#!/usr/bin/env python
"""
Unicode case folding database conversion utility
Parses the database and generates a C++ function which implements the case
folding algorithm. The database entries are of the form:
<code>; <status>; <mapping>; # <name>
<status> can be one of four characters:
C - Common mappings
S - mappings for Simple case folding
F - mappings for Full case folding
T - special case for Turkish I characters
Right now this generates a function which implements simple case folding (C+S
entries).
"""
import sys
import re
import urllib2
# This variable will body of the mappings function
body = ""
# Reads file line-by-line, extracts Common and Simple case fold mappings and
# returns a (from_char, to_char, from_name) tuple.
def mappings(f):
previous_from = -1
expr = re.compile(r'^(.*); [CS]; (.*); # (.*)')
for line in f:
m = expr.match(line)
if not m: continue
from_char = int(m.group(1), 16)
to_char = int(m.group(2), 16)
from_name = m.group(3)
if from_char <= previous_from:
raise Exception("Duplicate or unsorted characters in input")
yield from_char, to_char, from_name
previous_from = from_char
# Computes the shift (to_char - from_char) in a mapping.
def shift(mapping):
return mapping[1] - mapping[0]
# Computes the stride (from_char2 - from_char1) of two mappings.
def stride2(mapping1, mapping2):
return mapping2[0] - mapping1[0]
# Computes the stride of a list of mappings. The list should have at least two
# mappings. All mappings in the list are assumed to have the same stride.
def stride(block):
return stride2(block[0], block[1])
# b is a list of mappings. All the mappings are assumed to have the same
# shift and the stride between adjecant mappings (if any) is constant.
def dump_block(b):
global body
if len(b) == 1:
# Special case for handling blocks of length 1. We don't even need to
# emit the "if (C < X) return C" check below as all characters in this
# range will be caught by the "C < X" check emitted by the first
# non-trivial block.
body += " // {2}\n if (C == {0:#06x})\n return {1:#06x};\n".format(*b[0])
return
first = b[0][0]
last = first + stride(b) * (len(b)-1)
modulo = first % stride(b)
# All characters before this block map to themselves.
body += " if (C < {0:#06x})\n return C;\n".format(first)
body += " // {0} characters\n".format(len(b))
# Generic pattern: check upper bound (lower bound is checked by the "if"
# above) and modulo of C, return C+shift.
pattern = " if (C <= {0:#06x} && C % {1} == {2})\n return C + {3};\n"
if stride(b) == 2 and shift(b[0]) == 1 and modulo == 0:
# Special case:
# We can elide the modulo-check because the expression "C|1" will map
# the intervening characters to themselves.
pattern = " if (C <= {0:#06x})\n return C | 1;\n"
elif stride(b) == 1:
# Another special case: X % 1 is always zero, so don't emit the
# modulo-check.
pattern = " if (C <= {0:#06x})\n return C + {3};\n"
body += pattern.format(last, stride(b), modulo, shift(b[0]))
current_block = []
f = urllib2.urlopen(sys.argv[1])
for m in mappings(f):
if len(current_block) == 0:
current_block.append(m)
continue
if shift(current_block[0]) != shift(m):
# Incompatible shift, start a new block.
dump_block(current_block)
current_block = [m]
continue
if len(current_block) == 1 or stride(current_block) == stride2(current_block[-1], m):
current_block.append(m)
continue
# Incompatible stride, start a new block.
dump_block(current_block)
current_block = [m]
f.close()
dump_block(current_block)
print '//===---------- Support/UnicodeCaseFold.cpp -------------------------------===//'
print '//'
print '// This file was generated by utils/unicode-case-fold.py from the Unicode'
print '// case folding database at'
print '// ', sys.argv[1]
print '//'
print '// To regenerate this file, run:'
print '// utils/unicode-case-fold.py \\'
print '// "{}" \\'.format(sys.argv[1])
print '// > lib/Support/UnicodeCaseFold.cpp'
print '//'
print '//===----------------------------------------------------------------------===//'
print ''
print '#include "llvm/Support/Unicode.h"'
print ''
print "int llvm::sys::unicode::foldCharSimple(int C) {"
print body
print " return C;"
print "}"
| <filename>llvm-7.0.0.src/utils/unicode-case-fold.py<gh_stars>100-1000
#!/usr/bin/env python
"""
Unicode case folding database conversion utility
Parses the database and generates a C++ function which implements the case
folding algorithm. The database entries are of the form:
<code>; <status>; <mapping>; # <name>
<status> can be one of four characters:
C - Common mappings
S - mappings for Simple case folding
F - mappings for Full case folding
T - special case for Turkish I characters
Right now this generates a function which implements simple case folding (C+S
entries).
"""
import sys
import re
import urllib2
# This variable will body of the mappings function
body = ""
# Reads file line-by-line, extracts Common and Simple case fold mappings and
# returns a (from_char, to_char, from_name) tuple.
def mappings(f):
previous_from = -1
expr = re.compile(r'^(.*); [CS]; (.*); # (.*)')
for line in f:
m = expr.match(line)
if not m: continue
from_char = int(m.group(1), 16)
to_char = int(m.group(2), 16)
from_name = m.group(3)
if from_char <= previous_from:
raise Exception("Duplicate or unsorted characters in input")
yield from_char, to_char, from_name
previous_from = from_char
# Computes the shift (to_char - from_char) in a mapping.
def shift(mapping):
return mapping[1] - mapping[0]
# Computes the stride (from_char2 - from_char1) of two mappings.
def stride2(mapping1, mapping2):
return mapping2[0] - mapping1[0]
# Computes the stride of a list of mappings. The list should have at least two
# mappings. All mappings in the list are assumed to have the same stride.
def stride(block):
return stride2(block[0], block[1])
# b is a list of mappings. All the mappings are assumed to have the same
# shift and the stride between adjecant mappings (if any) is constant.
def dump_block(b):
global body
if len(b) == 1:
# Special case for handling blocks of length 1. We don't even need to
# emit the "if (C < X) return C" check below as all characters in this
# range will be caught by the "C < X" check emitted by the first
# non-trivial block.
body += " // {2}\n if (C == {0:#06x})\n return {1:#06x};\n".format(*b[0])
return
first = b[0][0]
last = first + stride(b) * (len(b)-1)
modulo = first % stride(b)
# All characters before this block map to themselves.
body += " if (C < {0:#06x})\n return C;\n".format(first)
body += " // {0} characters\n".format(len(b))
# Generic pattern: check upper bound (lower bound is checked by the "if"
# above) and modulo of C, return C+shift.
pattern = " if (C <= {0:#06x} && C % {1} == {2})\n return C + {3};\n"
if stride(b) == 2 and shift(b[0]) == 1 and modulo == 0:
# Special case:
# We can elide the modulo-check because the expression "C|1" will map
# the intervening characters to themselves.
pattern = " if (C <= {0:#06x})\n return C | 1;\n"
elif stride(b) == 1:
# Another special case: X % 1 is always zero, so don't emit the
# modulo-check.
pattern = " if (C <= {0:#06x})\n return C + {3};\n"
body += pattern.format(last, stride(b), modulo, shift(b[0]))
current_block = []
f = urllib2.urlopen(sys.argv[1])
for m in mappings(f):
if len(current_block) == 0:
current_block.append(m)
continue
if shift(current_block[0]) != shift(m):
# Incompatible shift, start a new block.
dump_block(current_block)
current_block = [m]
continue
if len(current_block) == 1 or stride(current_block) == stride2(current_block[-1], m):
current_block.append(m)
continue
# Incompatible stride, start a new block.
dump_block(current_block)
current_block = [m]
f.close()
dump_block(current_block)
print '//===---------- Support/UnicodeCaseFold.cpp -------------------------------===//'
print '//'
print '// This file was generated by utils/unicode-case-fold.py from the Unicode'
print '// case folding database at'
print '// ', sys.argv[1]
print '//'
print '// To regenerate this file, run:'
print '// utils/unicode-case-fold.py \\'
print '// "{}" \\'.format(sys.argv[1])
print '// > lib/Support/UnicodeCaseFold.cpp'
print '//'
print '//===----------------------------------------------------------------------===//'
print ''
print '#include "llvm/Support/Unicode.h"'
print ''
print "int llvm::sys::unicode::foldCharSimple(int C) {"
print body
print " return C;"
print "}"
| en | 0.78973 | #!/usr/bin/env python Unicode case folding database conversion utility Parses the database and generates a C++ function which implements the case folding algorithm. The database entries are of the form: <code>; <status>; <mapping>; # <name> <status> can be one of four characters: C - Common mappings S - mappings for Simple case folding F - mappings for Full case folding T - special case for Turkish I characters Right now this generates a function which implements simple case folding (C+S entries). # This variable will body of the mappings function # Reads file line-by-line, extracts Common and Simple case fold mappings and # returns a (from_char, to_char, from_name) tuple. # (.*)') # Computes the shift (to_char - from_char) in a mapping. # Computes the stride (from_char2 - from_char1) of two mappings. # Computes the stride of a list of mappings. The list should have at least two # mappings. All mappings in the list are assumed to have the same stride. # b is a list of mappings. All the mappings are assumed to have the same # shift and the stride between adjecant mappings (if any) is constant. # Special case for handling blocks of length 1. We don't even need to # emit the "if (C < X) return C" check below as all characters in this # range will be caught by the "C < X" check emitted by the first # non-trivial block. #06x})\n return {1:#06x};\n".format(*b[0]) # All characters before this block map to themselves. #06x})\n return C;\n".format(first) # Generic pattern: check upper bound (lower bound is checked by the "if" # above) and modulo of C, return C+shift. #06x} && C % {1} == {2})\n return C + {3};\n" # Special case: # We can elide the modulo-check because the expression "C|1" will map # the intervening characters to themselves. #06x})\n return C | 1;\n" # Another special case: X % 1 is always zero, so don't emit the # modulo-check. #06x})\n return C + {3};\n" # Incompatible shift, start a new block. # Incompatible stride, start a new block. | 2.872822 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.